def test_uniform(self): from openmdao.api import Problem, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid from openmdao.api import DOEDriver, UniformGenerator, SqliteRecorder, CaseReader prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = DOEDriver(UniformGenerator(num_samples=5)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup() prob.run_driver() prob.cleanup() cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 5) values = [] for n in range(cases.num_cases): case = cases.get_case(n) values.append((case.outputs['x'], case.outputs['y'], case.outputs['f_xy']))
def test_plackett_burman(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(PlackettBurmanGenerator()) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 2: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 3: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): self.assertEqual(cases.get_case(n).outputs['x'], expected[n]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[n]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[n]['f_xy'])
def _check_recorder_file(self, pb, cstr, filename): pb.driver = OneraSegoDriver() pb.driver.options["optimizer"] = "SEGOMOE" pb.driver.opt_settings["maxiter"] = 10 # default model n_var = 2 mod_obj = { "type": "Krig", "corr": "squared_exponential", "regr": "constant", "theta0": [1.0] * n_var, "thetaL": [0.1] * n_var, "thetaU": [10.0] * n_var, "normalize": True, } model_type = {"obj": mod_obj} if cstr: model_type["con"] = mod_obj pb.driver.opt_settings["model_type"] = model_type recorder = SqliteRecorder(self.case_recorder_filename) pb.model.add_recorder(recorder) pb.setup() self.pb.run_driver() self.assertTrue(os.path.exists(self.case_recorder_filename)) reader = CaseReader(self.case_recorder_filename) for case_id in reader.list_cases(): case = reader.get_case(case_id) print(case.outputs["obj"])
def test_box_behnken(self): upper = 10. center = 1 prob = Problem() model = prob.model indep = model.add_subsystem('indep', IndepVarComp(), promotes=['*']) indep.add_output('x', 0.0) indep.add_output('y', 0.0) indep.add_output('z', 0.0) model.add_subsystem('comp', ExecComp('a = x**2 + y - z'), promotes=['*']) model.add_design_var('x', lower=0., upper=upper) model.add_design_var('y', lower=0., upper=upper) model.add_design_var('z', lower=0., upper=upper) model.add_objective('a') prob.driver = DOEDriver(BoxBehnkenGenerator(center=center)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("CASES.db").driver_cases # The Box-Behnken design for 3 factors involves three blocks, in each of # which 2 factors are varied thru the 4 possible combinations of high & low. # It also includes centre points (all factors at their central values). # ref: https://en.wikipedia.org/wiki/Box-Behnken_design self.assertEqual(cases.num_cases, (3*4)+center) expected = { 0: {'x': np.array([ 0.]), 'y': np.array([ 0.]), 'z': np.array([ 5.])}, 1: {'x': np.array([10.]), 'y': np.array([ 0.]), 'z': np.array([ 5.])}, 2: {'x': np.array([ 0.]), 'y': np.array([10.]), 'z': np.array([ 5.])}, 3: {'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([ 5.])}, 4: {'x': np.array([ 0.]), 'y': np.array([ 5.]), 'z': np.array([ 0.])}, 5: {'x': np.array([10.]), 'y': np.array([ 5.]), 'z': np.array([ 0.])}, 6: {'x': np.array([ 0.]), 'y': np.array([ 5.]), 'z': np.array([10.])}, 7: {'x': np.array([10.]), 'y': np.array([ 5.]), 'z': np.array([10.])}, 8: {'x': np.array([ 5.]), 'y': np.array([ 0.]), 'z': np.array([ 0.])}, 9: {'x': np.array([ 5.]), 'y': np.array([10.]), 'z': np.array([ 0.])}, 10: {'x': np.array([ 5.]), 'y': np.array([ 0.]), 'z': np.array([10.])}, 11: {'x': np.array([ 5.]), 'y': np.array([10.]), 'z': np.array([10.])}, 12: {'x': np.array([ 5.]), 'y': np.array([ 5.]), 'z': np.array([ 5.])}, } for n in range(cases.num_cases): self.assertEqual(cases.get_case(n).outputs['x'], expected[n]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[n]['y']) self.assertEqual(cases.get_case(n).outputs['z'], expected[n]['z'])
def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3), parallel=True) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup() failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])}, 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])}, 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])}, 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])}, 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])}, 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } size = prob.comm.size rank = prob.comm.rank # cases will be split across files for each proc filename = "CASES.db_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size)) for n in range(num_cases): case = cases.get_case(n) idx = n * size + rank # index of expected case self.assertEqual(cases.get_case(n).outputs['x'], expected[idx]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[idx]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[idx]['f_xy'])
def test_uniform(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = DOEDriver(UniformGenerator(num_samples=5, seed=0)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() # all values should be between -10 and 10, check expected values for seed = 0 expected = { 0: {'x': np.array([ 0.97627008]), 'y': np.array([ 4.30378733])}, 1: {'x': np.array([ 2.05526752]), 'y': np.array([ 0.89766366])}, 2: {'x': np.array([-1.52690401]), 'y': np.array([ 2.91788226])}, 3: {'x': np.array([-1.24825577]), 'y': np.array([ 7.83546002])}, 4: {'x': np.array([ 9.27325521]), 'y': np.array([-2.33116962])}, } cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 5) for n in range(cases.num_cases): assert_rel_error(self, cases.get_case(n).outputs['x'], expected[n]['x'], 1e-4) assert_rel_error(self, cases.get_case(n).outputs['y'], expected[n]['y'], 1e-4)
def compute(self, inputs, outputs): folder_output = self.options['folder_output'] optimization_log = self.options['folder_output'] + self.options['optimization_log'] if os.path.exists(optimization_log): cr = CaseReader(optimization_log) cases = cr.list_cases() rec_data = {} iterations = [] for i, casei in enumerate(cases): iterations.append(i) it_data = cr.get_case(casei) # parameters = it_data.get_responses() for parameters in [it_data.get_responses(), it_data.get_design_vars()]: for j, param in enumerate(parameters.keys()): if i == 0: rec_data[param] = [] rec_data[param].append(parameters[param]) for param in rec_data.keys(): fig, ax = plt.subplots(1,1,figsize=(5.3, 4)) ax.plot(iterations, rec_data[param]) ax.set(xlabel='Number of Iterations' , ylabel=param) fig_name = 'Convergence_trend_' + param + '.pdf' fig.savefig(folder_output + fig_name) plt.close(fig)
def test_full_factorial_array(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('xy', np.array([0., 0.])), promotes=['*']) model.add_subsystem('comp', ParaboloidArray(), promotes=['*']) model.add_design_var('xy', lower=np.array([-50., -50.]), upper=np.array([50., 50.])) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: { 'xy': np.array([-50., -50.]) }, 1: { 'xy': np.array([0., -50.]) }, 2: { 'xy': np.array([50., -50.]) }, 3: { 'xy': np.array([-50., 0.]) }, 4: { 'xy': np.array([0., 0.]) }, 5: { 'xy': np.array([50., 0.]) }, 6: { 'xy': np.array([-50., 50.]) }, 7: { 'xy': np.array([0., 50.]) }, 8: { 'xy': np.array([50., 50.]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 9) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs self.assertEqual(outputs['xy'][0], expected[n]['xy'][0]) self.assertEqual(outputs['xy'][1], expected[n]['xy'][1])
class Reader: """`Reader` class loads and displays stored simulations using an OpenMDAO `CaseReader` class instance. Parameters ---------- db : str Full path of the database where the solution is stored case_id : str, optional Case identifier, ``initial`` to load the first iteration, ``final`` to load the final solution. Default is ``final`` db_exp : str or ``None``, optional Full path of the database where the explicit simulation is stored or ``None``. Default is ``None`` Attributes ---------- case_reader : CaseReader OpenMDAO `CaseReader` class instance for the implicit solution case_id : str Case identifier, ``initial`` to load the first iteration, ``final`` to load the final solution. case : Case OpenMDAO `Case` class instance identified by `case_id` within the `case_reader` object case_reader_exp : CaseReader or ``None`` OpenMDAO `CaseReader` class instance for the explicit simulation or ``None`` case : Case or ``None`` OpenMDAO `Case` class instance identified by `case_id` within the `case_reader_exp` object or ``None`` """ def __init__(self, db, case_id='final', db_exp=None): """Init Reader class. """ self.case_reader = CaseReader(db) if case_id in ['initial', 'final']: self.case_id = case_id else: raise ValueError("Case must be either 'initial' or 'final'") self.case = self.case_reader.get_case(self.case_id) # explicit simulation if db_exp is not None: self.case_reader_exp = CaseReader(db_exp) self.case_exp = self.case_reader_exp.get_case(-1) else: self.case_reader_exp = self.case_exp = None
def test_sellar(self): self.pb = pb = Problem(SellarMDA()) pb.model.add_design_var("x", lower=0, upper=10) pb.model.add_design_var("z", lower=0, upper=10) pb.model.add_objective("obj") pb.model.add_constraint("con1", upper=0) pb.model.add_constraint("con2", upper=0) pb.driver = OneraSegoDriver(optimizer="SEGOMOE") pb.driver.opt_settings["maxiter"] = 10 self.case_recorder_filename = "test_segomoe_driver_sellar.sqlite" recorder = SqliteRecorder(self.case_recorder_filename) pb.model.add_recorder(recorder) pb.setup() self.pb.run_driver() self.assertTrue(os.path.exists(self.case_recorder_filename)) reader = CaseReader(self.case_recorder_filename) for case_id in reader.list_cases(): case = reader.get_case(case_id) print(case.outputs["obj"])
def test_full_factorial(self): from openmdao.api import Problem, IndepVarComp, PETScVector from openmdao.test_suite.components.paraboloid import Paraboloid from openmdao.api import DOEDriver, FullFactorialGenerator from openmdao.api import SqliteRecorder, CaseReader from mpi4py import MPI prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.options['parallel'] = True prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup() prob.run_driver() prob.cleanup() self.assertEqual(MPI.COMM_WORLD.size, 2) # check recorded cases from each case file rank = MPI.COMM_WORLD.rank filename = "CASES.db_%d" % rank self.assertEqual(filename, "CASES.db_%d" % rank) cases = CaseReader(filename).driver_cases self.assertEqual(cases.num_cases, 5 if rank == 0 else 4) values = [] for n in range(cases.num_cases): case = cases.get_case(n) values.append((case.outputs['x'], case.outputs['y'], case.outputs['f_xy']))
def test_latin_hypercube_center(self): samples = 4 upper = 10. prob = Problem() model = prob.model indep = model.add_subsystem('indep', IndepVarComp()) indep.add_output('x', 0.0) indep.add_output('y', 0.0) model.add_subsystem('comp', Paraboloid()) model.connect('indep.x', 'comp.x') model.connect('indep.y', 'comp.y') model.add_design_var('indep.x', lower=0., upper=upper) model.add_design_var('indep.y', lower=0., upper=upper) model.add_objective('comp.f_xy') prob.driver = DOEDriver(LatinHypercubeGenerator(samples=samples, criterion='c')) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, samples) # the sample space for each variable (0 to upper) should be divided into # equal size buckets and each variable should have a value in each bucket bucket_size = upper/samples all_buckets = set(range(samples)) x_buckets_filled = set() y_buckets_filled = set() # with criterion of 'center', each value should be in the center of it's bucket valid_values = [round(bucket_size*(bucket + 1/2), 3) for bucket in all_buckets] for n in range(cases.num_cases): x = float(cases.get_case(n).outputs['indep.x']) y = float(cases.get_case(n).outputs['indep.y']) x_buckets_filled.add(int(x/bucket_size)) y_buckets_filled.add(int(y/bucket_size)) self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values)) self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values)) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(generator=FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])}, 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])}, 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])}, 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])}, 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])}, 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 9) for n in range(cases.num_cases): self.assertEqual(cases.get_case(n).outputs['x'], expected[n]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[n]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[n]['f_xy'])
def test_fan_in_grouped(self): from openmdao.api import Problem, PETScVector from openmdao.test_suite.groups.parallel_groups import FanInGrouped from openmdao.api import DOEDriver, FullFactorialGenerator from openmdao.api import SqliteRecorder, CaseReader from mpi4py import MPI prob = Problem(FanInGrouped()) model = prob.model model.add_design_var('iv.x1', lower=0.0, upper=1.0) model.add_design_var('iv.x2', lower=0.0, upper=1.0) model.add_objective('c3.y') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) doe_parallel = prob.driver.options['parallel'] = 2 prob.setup() prob.run_driver() prob.cleanup() rank = MPI.COMM_WORLD.rank # check recorded cases from each case file if rank < doe_parallel: filename = "CASES.db_%d" % rank cases = CaseReader(filename).driver_cases values = [] for n in range(cases.num_cases): case = cases.get_case(n) values.append((case.outputs['iv.x1'], case.outputs['iv.x2'], case.outputs['c3.y'])) self.assertEqual("\n"+"\n".join(["iv.x1: %5.2f, iv.x2: %5.2f, c3.y: %6.2f" % (x1, x2, y) for x1, x2, y in values]), self.expect_text)
def test_unknowns(self): """ Tests that the reader returns unknowns correctly. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) last_case_id = cr.list_cases()[-1] with h5py.File(self.filename, 'r') as f: for key in f[last_case_id]['Unknowns'].keys(): val = f[last_case_id]['Unknowns'][key][()] np.testing.assert_almost_equal(last_case[key], val, err_msg='Case reader gives ' 'incorrect Unknown value' ' for {0}'.format(key))
def test_resids(self): """ Tests that the reader returns resids correctly. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) last_case_id = cr.list_cases()[-1] with SqliteDict(self.filename, 'iterations', flag='r') as db: for key in db[last_case_id]['Residuals'].keys(): val = db[last_case_id]['Residuals'][key][()] np.testing.assert_almost_equal(last_case.resids[key], val, err_msg='Case reader gives ' 'incorrect Unknown value' ' for {0}'.format(key))
def test_params(self): """ Tests that the reader returns params correctly. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) last_case_id = cr.list_cases()[-1] n = cr.num_cases with SqliteDict(self.filename, 'iterations', flag='r') as db: for key in db[last_case_id]['Parameters'].keys(): val = db[last_case_id]['Parameters'][key] np.testing.assert_almost_equal(last_case.parameters[key], val, err_msg='Case reader gives ' 'incorrect Parameter value' ' for {0}'.format(key))
def test_resids(self): """ Tests that the reader returns resids correctly. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) last_case_id = cr.list_cases()[-1] n = cr.num_cases with h5py.File(self.filename, 'r') as f: for key in f[last_case_id]['Residuals'].keys(): val = f[last_case_id]['Residuals'][key][()] np.testing.assert_almost_equal(last_case.resids[key], val, err_msg='Case reader gives' ' incorrect Unknown' ' value for' ' {0}'.format(key))
def test_brachistochrone_recording(self): import matplotlib matplotlib.use('Agg') from openmdao.api import Problem, Group, ScipyOptimizeDriver, DirectSolver, \ SqliteRecorder, CaseReader from openmdao.utils.assert_utils import assert_rel_error from dymos import Phase, GaussLobatto from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE p = Problem(model=Group()) p.driver = ScipyOptimizeDriver() phase = Phase(ode_class=BrachistochroneODE, transcription=GaussLobatto(num_segments=10)) p.model.add_subsystem('phase0', phase) phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10)) phase.set_state_options('x', fix_initial=True, fix_final=True) phase.set_state_options('y', fix_initial=True, fix_final=True) phase.set_state_options('v', fix_initial=True) phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9) phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665) # Minimize time at the end of the phase phase.add_objective('time', loc='final', scaler=10) p.model.linear_solver = DirectSolver() # Recording rec = SqliteRecorder('brachistochrone_solution.db') p.driver.recording_options['record_desvars'] = True p.driver.recording_options['record_responses'] = True p.driver.recording_options['record_objectives'] = True p.driver.recording_options['record_constraints'] = True p.model.recording_options['record_metadata'] = True p.driver.add_recorder(rec) p.model.add_recorder(rec) phase.add_recorder(rec) p.setup() p['phase0.t_initial'] = 0.0 p['phase0.t_duration'] = 2.0 p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input') p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input') p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input') p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input') # Solve for the optimal trajectory p.run_driver() # Test the results assert_rel_error(self, p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3) cr = CaseReader('brachistochrone_solution.db') system_cases = cr.list_cases('root') case = cr.get_case(system_cases[-1]) outputs = dict([ (o[0], o[1]) for o in case.list_outputs(units=True, shape=True, out_stream=None) ]) assert_rel_error( self, p['phase0.controls:theta'], outputs['phase0.control_group.indep_controls.controls:theta'] ['value'])
prob.set_val("traj.phase0.controls:beta", phase0.interpolate(ys=[-75*np.pi/180, 0*np.pi/180], nodes="control_input"), units="rad") recorder = SqliteRecorder("reentry.sql") prob.driver.add_recorder(recorder) prob.run_driver() # prob.run_model() sim_out = traj.simulate() prob.cleanup() case_reader = CaseReader("reentry.sql") driver_cases = case_reader.list_cases("driver") last_case = case_reader.get_case(driver_cases[-1]) final_constraints = last_case.get_constraints() final_q = final_constraints["traj.phase0.path_constraints.path:q"] plt.figure(0) plt.plot(prob.get_val("traj.phase0.timeseries.time", units="s"), prob.get_val("traj.phase0.timeseries.controls:alpha", units="deg"), "ro", label="Solution") plt.plot(sim_out.get_val("traj.phase0.timeseries.time", units="s"), sim_out.get_val("traj.phase0.timeseries.controls:alpha", units="deg"), "b-", label="Simulation") plt.title("Angle of Attack over Time") plt.xlabel("Time (s)") plt.ylabel("Angle of Attack (degrees)") plt.legend() plt.figure(1) plt.plot(prob.get_val("traj.phase0.timeseries.time", units="s"), prob.get_val("traj.phase0.timeseries.controls:beta", units="deg"), "ro", label="Solution") plt.plot(sim_out.get_val("traj.phase0.timeseries.time", units="s"), sim_out.get_val("traj.phase0.timeseries.controls:beta", units="deg"), "b-", label="Simulation") plt.title("Bank Angle over Time")
def test_fan_in_grouped_serial(self): # run cases on all procs (parallel model will run on single proc) doe_parallel = True prob = Problem(FanInGrouped()) model = prob.model model.add_design_var('iv.x1', lower=0.0, upper=1.0) model.add_design_var('iv.x2', lower=0.0, upper=1.0) model.add_objective('c3.y') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.driver.options['parallel'] = doe_parallel prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([0.0]) }, 1: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.0]) }, 2: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.0]) }, 3: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.5]) }, 4: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.5]) }, 5: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.5]) }, 6: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.0]) }, 7: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.0]) }, 8: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.0]) }, } rank = prob.comm.rank size = prob.comm.size // doe_parallel num_cases = 0 # cases will be split across files for each proc up to the number requested filename = "cases.sql_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size)) for n in range(num_cases): idx = n * size + rank # index of expected case outputs = cases.get_case(n).outputs self.assertEqual(outputs['iv.x1'], expected[idx]['iv.x1']) self.assertEqual(outputs['iv.x2'], expected[idx]['iv.x2']) self.assertEqual(outputs['c3.y'], expected[idx]['c3.y']) # total number of cases recorded across all requested procs num_cases = prob.comm.allgather(num_cases) self.assertEqual(sum(num_cases), len(expected))
twistcp = [] mesh = [] chordcp = [] forces = [] loads = [] vm = [] sparThickness = [] skinThickness = [] tbs = [] widths = [] sparThincknesscp = [] skinThincknesscp = [] for i in range(iterations): #for i in range(350,380): case = cr.get_case(driver_cases[i]) design_vars = case.get_design_vars() objective = case.get_objectives() constraints = case.get_constraints() mrho.append(design_vars['mrho']) masse.append(case.outputs['wing.structural_mass'][0]) co2.append(objective['emitted_co2'][0]) taper.append(case.inputs['wing.geometry.mesh.taper.taper'][0]) span.append(case.inputs['wing.geometry.mesh.stretch.span'][0]) chord.append(case.inputs['wing.geometry.mesh.scale_x.chord'][0]) chordTip.append(case.inputs['wing.geometry.mesh.scale_x.chord'][-1]) surface0.append(case.outputs['AS_point_0.coupled.wing.S_ref'][0]) # surface1.append(case.outputs['AS_point_1.coupled.wing.S_ref'][0]) sparThicknessRoot.append(design_vars['wing.spar_thickness_cp'][-1]) sparThicknessTip.append(design_vars['wing.spar_thickness_cp'][0]) skinThicknessRoot.append(design_vars['wing.skin_thickness_cp'][-1])
if options.batch or options.parallel: exit(0) reader = CaseReader(case_recorder_filename) cases = reader.list_cases('driver') n = len(cases) data = {'inputs': {}, 'outputs': {}} data['inputs']['LpA'] = np.zeros((n, ) + (1, )) data['inputs']['Ns'] = np.zeros((n, ) + (1, )) data['inputs']['p'] = np.zeros((n, ) + (1, )) data['inputs']['PayCap'] = np.zeros((n, ) + (1, )) data['outputs']['C_TOTAL'] = np.zeros((n, ) + (1, )) for i in range(len(cases)): case = reader.get_case(cases[i]) data['inputs']['LpA'][i, :] = case.outputs['LpA'] data['inputs']['Ns'][i, :] = case.outputs['Ns'] data['inputs']['p'][i, :] = case.outputs['p'] data['inputs']['PayCap'][i, :] = case.outputs['PayCap'] data['outputs']['C_TOTAL'][i, :] = case.outputs['C_TOTAL'] output = data['outputs']['C_TOTAL'].reshape(-1) input = data['inputs']['LpA'].reshape(-1) plt.subplot(1, 4, 1) plt.plot(input[0::1], output[0::1], '.') plt.ylabel('C_TOTAL') plt.xlabel('LpA') input = data['inputs']['Ns'].reshape(-1)
def setup_prob(self): """ Short method to select the optimizer. Uses pyOptSparse if available, or Scipy's SLSQP otherwise. """ try: # Use pyOptSparse optimizer if installed from openmdao.api import pyOptSparseDriver self.prob.driver = pyOptSparseDriver() if self.prob_dict['optimizer'] == 'SNOPT': self.prob.driver.options['optimizer'] = "SNOPT" self.prob.driver.opt_settings = {'Major optimality tolerance': 1.0e-8, 'Major feasibility tolerance': 1.0e-8, 'Major iterations limit':400, 'Minor iterations limit':2000, 'Iterations limit':1000 } elif self.prob_dict['optimizer'] == 'ALPSO': self.prob.driver.options['optimizer'] = 'ALPSO' self.prob.driver.opt_settings = {'SwarmSize': 40, 'maxOuterIter': 200, 'maxInnerIter': 6, 'rtol': 1e-5, 'atol': 1e-5, 'dtol': 1e-5, 'printOuterIters': 1 } elif self.prob_dict['optimizer'] == 'NOMAD': self.prob.driver.options['optimizer'] = 'NOMAD' self.prob.driver.opt_settings = {'maxiter':1000, 'minmeshsize':1e-12, 'minpollsize':1e-12, 'displaydegree':0, 'printfile':1 } elif self.prob_dict['optimizer'] == 'SLSQP': self.prob.driver.options['optimizer'] = 'SLSQP' self.prob.driver.opt_settings = {'ACC' : 1e-10 } except: # Use Scipy SLSQP optimizer if pyOptSparse not installed self.prob.driver = ScipyOptimizer() self.prob.driver.options['optimizer'] = 'SLSQP' self.prob.driver.options['disp'] = True self.prob.driver.options['tol'] = 1.0e-10 # Actually call the OpenMDAO functions to add the design variables, # constraints, and objective. for desvar_name, desvar_data in iteritems(self.desvars): self.prob.driver.add_desvar(desvar_name, **desvar_data) for con_name, con_data in iteritems(self.constraints): self.prob.driver.add_constraint(con_name, **con_data) for obj_name, obj_data in iteritems(self.objective): self.prob.driver.add_objective(obj_name, **obj_data) # Use finite differences over the entire model if user selected it if self.prob_dict['force_fd']: self.prob.root.deriv_options['type'] = 'fd' # Record optimization history to a database. # Data saved here can be examined using `plot_all.py` or `OptView.py` if self.prob_dict['record_db']: recorder = SqliteRecorder(self.prob_dict['prob_name']+".db") recorder.options['record_params'] = True recorder.options['record_derivs'] = True self.prob.driver.add_recorder(recorder) # Profile (time) the problem if self.prob_dict['profile']: profile.setup(self.prob) profile.start() # Set up the problem self.prob.setup() # Use warm start from previous db file if desired. # Note that we only have access to the unknowns, not the gradient history. if self.prob_dict['previous_case_db'] is not None: # Open the previous case and start from the last iteration. # Change the -1 value in get_case() if you want to select a different iteration. cr = CaseReader(self.prob_dict['previous_case_db']) case = cr.get_case(-1) # Loop through the unknowns and set them for this problem. for param_name, param_data in iteritems(case.unknowns): self.prob[param_name] = param_data
def test_derivs(self): """ Test that derivs is None if not provided in the recording. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) self.assertIsNone(last_case.derivs, "Case erroneously contains derivs.")
def test_resids(self): """ Test that params is None if not provided in the recording. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) self.assertIsNone(last_case.resids, "Case erroneously contains resids.")
def compute(self, inputs, outputs): # type: (Vector, Vector) -> None """Computation performed by the component. Parameters ---------- inputs : all inputs coming from outside the component in the group outputs : all outputs provided outside the component in the group""" # Define problem of subdriver p = self.prob m = p.model # Push global inputs down for input_name in m.model_constants: p[input_name] = inputs[input_name] failed_experiments = {} # sort to have outputs first sorted_model_super_inputs = sorted(m.model_super_inputs.keys(), reverse=True) for input_name in sorted_model_super_inputs: if input_name in m.sm_of_training_params.keys(): # Add these inputs as training data sm_uid = m.sm_of_training_params[input_name] pred_param = m.find_mapped_parameter(input_name, m.sm_prediction_inputs[sm_uid] | m.sm_prediction_outputs[sm_uid]) sm_comp = getattr(m, str_to_valid_sys_name(sm_uid)) if sm_uid not in failed_experiments.keys(): failed_experiments[sm_uid] = (None, None) sm_comp.options['train:'+pred_param], failed_experiments[sm_uid]\ = p.postprocess_experiments(inputs[input_name], input_name, failed_experiments[sm_uid]) else: p[input_name] = inputs[input_name] # Provide message on failed experiments warn_about_failed_experiments(failed_experiments) # Set initial values of design variables back to original ones (to avoid using values of # last run) for des_var, attrbs in m.design_vars.items(): p[des_var] = attrbs['initial'] # Run the driver print('Running subdriver {}'.format(self.options['driver_uid'])) if 'Sub-Optimizer' not in p.case_reader_path: p.driver.cleanup() basename, extension = os.path.splitext(p.case_reader_path) case_reader_filename = basename + '_loop' + str(self._run_count) + extension p.driver.add_recorder(SqliteRecorder(case_reader_filename)) p.driver.recording_options['includes'] = ['*'] p.driver.recording_options['record_model_metadata'] = True p.driver._setup_recording() p.run_driver() self._add_run_count() # Pull the value back up to the output array doe_out_vecs = {} for output_name in m.model_super_outputs: # Add these outputs as vectors based on DOE driver if output_name in m.doe_parameters.keys(): doe_out_vecs[output_name] = [] else: if not p.driver.fail: outputs[output_name] = p[output_name] else: outputs[output_name] = float('nan') # If the driver failed (hence, optimization failed), then send message and clean if p.driver.fail: print('Driver run failed!') p.clean_driver_after_failure() # Provide DOE output vectors as output of the component, if this is expected if doe_out_vecs: # First read out the case reader cr = CaseReader(case_reader_filename) cases = cr.list_cases('driver') for n in range(len(cases)): cr_outputs = cr.get_case(n).outputs doe_param_matches = {} for output_name in doe_out_vecs.keys(): doe_param_matches[output_name] = doe_param_match \ = m.find_mapped_parameter(output_name, cr_outputs.keys()) doe_out_vecs[output_name].append(cr_outputs[doe_param_match][0]) # Then write the final vectors to the global output array for output_name in doe_out_vecs.keys(): if output_name in p.doe_samples[p.driver_uid]['inputs']: des_var_match = m.find_mapped_parameter(output_name, m._design_vars.keys()) doe_out_vecs[output_name] = unscale_value(doe_out_vecs[output_name], m._design_vars[des_var_match]['ref0'], m._design_vars[des_var_match]['ref']) outputs[output_name] = np.array(doe_out_vecs[output_name])
def test_latin_hypercube(self): samples = 4 bounds = np.array([ [-1, -10], # lower bounds for x and y [1, 10] # upper bounds for x and y ]) xlb, xub = bounds[0][0], bounds[1][0] ylb, yub = bounds[0][1], bounds[1][1] prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=xlb, upper=xub) model.add_design_var('y', lower=ylb, upper=yub) model.add_objective('f_xy') prob.driver = DOEDriver() prob.driver.options['generator'] = LatinHypercubeGenerator(samples=4, seed=0) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() # the sample space for each variable should be divided into equal # size buckets and each variable should have a value in each bucket all_buckets = set(range(samples)) xlb, xub = bounds[0][0], bounds[1][0] x_offset = 0 - xlb x_bucket_size = xub - xlb x_buckets_filled = set() ylb, yub = bounds[0][1], bounds[1][1] y_offset = 0 - ylb y_bucket_size = yub - ylb y_buckets_filled = set() # expected values for seed = 0 expected = { 0: { 'x': np.array([-0.19861831]), 'y': np.array([-6.42405317]) }, 1: { 'x': np.array([0.2118274]), 'y': np.array([9.458865]) }, 2: { 'x': np.array([0.71879361]), 'y': np.array([3.22947057]) }, 3: { 'x': np.array([-0.72559325]), 'y': np.array([-2.27558409]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs x = outputs['x'] y = outputs['y'] bucket = int((x + x_offset) / (x_bucket_size / samples)) x_buckets_filled.add(bucket) bucket = int((y + y_offset) / (y_bucket_size / samples)) y_buckets_filled.add(bucket) assert_rel_error(self, x, expected[n]['x'], 1e-4) assert_rel_error(self, y, expected[n]['y'], 1e-4) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_array(self): samples = 4 bounds = np.array([ [-10, -50], # lower bounds for x and y [10, 50] # upper bounds for x and y ]) prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('xy', np.array([50., 50.])), promotes=['*']) model.add_subsystem('comp', ParaboloidArray(), promotes=['*']) model.add_design_var('xy', lower=bounds[0], upper=bounds[1]) model.add_objective('f_xy') prob.driver = DOEDriver(LatinHypercubeGenerator(samples=4, seed=0)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() # the sample space for each variable should be divided into equal # size buckets and each variable should have a value in each bucket all_buckets = set(range(samples)) xlb, xub = bounds[0][0], bounds[1][0] x_offset = 0 - xlb x_bucket_size = xub - xlb x_buckets_filled = set() ylb, yub = bounds[0][1], bounds[1][1] y_offset = 0 - ylb y_bucket_size = yub - ylb y_buckets_filled = set() # expected values for seed = 0 expected = { 0: { 'xy': np.array([-1.98618312, -32.12026584]) }, 1: { 'xy': np.array([2.118274, 47.29432502]) }, 2: { 'xy': np.array([7.18793606, 16.14735283]) }, 3: { 'xy': np.array([-7.25593248, -11.37792043]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs x = outputs['xy'][0] y = outputs['xy'][1] bucket = int((x + x_offset) / (x_bucket_size / samples)) x_buckets_filled.add(bucket) bucket = int((y + y_offset) / (y_bucket_size / samples)) y_buckets_filled.add(bucket) assert_rel_error(self, x, expected[n]['xy'][0], 1e-4) assert_rel_error(self, y, expected[n]['xy'][1], 1e-4) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
Plots objective and constraint histories from the recorded data in 'data.sql'. """ from __future__ import print_function from six.moves import range import numpy as np from matplotlib import pylab from openmdao.api import CaseReader # load cases from recording database cr = CaseReader('ru_mdp.sql') #cases = cr.get_cases('driver') cases = cr.list_cases('driver') case = cr.get_case(cases[0]) num_cases = len(cases) if num_cases == 0: print('No data yet...') quit() else: print('# cases:', num_cases) # determine # of constraints constraints = list(case.get_constraints()) n_con = len(constraints) # collect data into arrays for plotting X = np.zeros(num_cases) # obj.val
# model.add_design_var('eta_sa', lower = 0.01, upper=1) model.add_design_var('M_ps', lower=0.01, upper=100) model.add_constraint('con1',) model.add_objective('t_tot') #Case Recorder Setting recorder = SqliteRecorder('test.sql') p.driver.add_recorder(recorder) p.add_recorder(recorder) #Set-up and Run p.setup() p.set_solver_print(2) p.run_driver() # view_model(p) p.record_iteration('final') p.cleanup() cr = CaseReader('test.sql') drivercases = cr.list_cases('driver') case = cr.get_case(drivercases[0]) print(sorted(case.outputs.keys())) print('P_req',['P_req']) print('A_sa',p['A_sa']) print('M_u',p['M_u']) print('M_ps',p['M_ps']) print(p['t_tot']) print(p['cycles'])