def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(generator=FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])}, 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])}, 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])}, 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])}, 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])}, 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 9) for n in range(cases.num_cases): self.assertEqual(cases.get_case(n).outputs['x'], expected[n]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[n]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[n]['f_xy'])
def test_fan_in_grouped(self): from openmdao.api import Problem, PETScVector from openmdao.test_suite.groups.parallel_groups import FanInGrouped from openmdao.api import DOEDriver, FullFactorialGenerator from openmdao.api import SqliteRecorder, CaseReader from mpi4py import MPI prob = Problem(FanInGrouped()) model = prob.model model.add_design_var('iv.x1', lower=0.0, upper=1.0) model.add_design_var('iv.x2', lower=0.0, upper=1.0) model.add_objective('c3.y') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) doe_parallel = prob.driver.options['parallel'] = 2 prob.setup() prob.run_driver() prob.cleanup() rank = MPI.COMM_WORLD.rank # check recorded cases from each case file if rank < doe_parallel: filename = "CASES.db_%d" % rank cases = CaseReader(filename).driver_cases values = [] for n in range(cases.num_cases): case = cases.get_case(n) values.append((case.outputs['iv.x1'], case.outputs['iv.x2'], case.outputs['c3.y'])) self.assertEqual("\n"+"\n".join(["iv.x1: %5.2f, iv.x2: %5.2f, c3.y: %6.2f" % (x1, x2, y) for x1, x2, y in values]), self.expect_text)
def test_no_generator(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = DOEDriver() prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 0)
def test_uniform(self): from openmdao.api import Problem, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid from openmdao.api import DOEDriver, UniformGenerator, SqliteRecorder, CaseReader prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = DOEDriver(UniformGenerator(num_samples=5)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup() cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 5) values = [] for n in range(cases.num_cases): outputs = cases.get_case(n).outputs values.append((outputs['x'], outputs['y'], outputs['f_xy'])) print("\n".join([ "x: %5.2f, y: %5.2f, f_xy: %6.2f" % (x, y, f_xy) for x, y, f_xy in values ]))
def test_full_factorial_array(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('xy', np.array([0., 0.])), promotes=['*']) model.add_subsystem('comp', ParaboloidArray(), promotes=['*']) model.add_design_var('xy', lower=np.array([-50., -50.]), upper=np.array([50., 50.])) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: {'xy': np.array([-50., -50.])}, 1: {'xy': np.array([ 0., -50.])}, 2: {'xy': np.array([ 50., -50.])}, 3: {'xy': np.array([-50., 0.])}, 4: {'xy': np.array([ 0., 0.])}, 5: {'xy': np.array([ 50., 0.])}, 6: {'xy': np.array([-50., 50.])}, 7: {'xy': np.array([ 0., 50.])}, 8: {'xy': np.array([ 50., 50.])}, } cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, 9) for n in range(cases.num_cases): self.assertEqual(cases.get_case(n).outputs['xy'][0], expected[n]['xy'][0]) self.assertEqual(cases.get_case(n).outputs['xy'][1], expected[n]['xy'][1])
def test_fan_in_grouped_serial(self): # run cases on all procs (parallel model will run on single proc) doe_parallel = True prob = Problem(FanInGrouped()) model = prob.model model.add_design_var('iv.x1', lower=0.0, upper=1.0) model.add_design_var('iv.x2', lower=0.0, upper=1.0) model.add_objective('c3.y') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.driver.options['parallel'] = doe_parallel prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([0.0]) }, 1: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.0]) }, 2: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.0]) }, 3: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.5]) }, 4: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.5]) }, 5: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.5]) }, 6: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.0]) }, 7: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.0]) }, 8: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.0]) }, } rank = prob.comm.rank size = prob.comm.size // doe_parallel num_cases = 0 # cases will be split across files for each proc up to the number requested filename = "cases.sql_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size)) for n in range(num_cases): idx = n * size + rank # index of expected case outputs = cases.get_case(n).outputs self.assertEqual(outputs['iv.x1'], expected[idx]['iv.x1']) self.assertEqual(outputs['iv.x2'], expected[idx]['iv.x2']) self.assertEqual(outputs['c3.y'], expected[idx]['c3.y']) # total number of cases recorded across all requested procs num_cases = prob.comm.allgather(num_cases) self.assertEqual(sum(num_cases), len(expected))
def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3), parallel=True) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup() failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: { 'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00]) }, 1: { 'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25]) }, 2: { 'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00]) }, 3: { 'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25]) }, 4: { 'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75]) }, 5: { 'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75]) }, 6: { 'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00]) }, 7: { 'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75]) }, 8: { 'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00]) }, } size = prob.comm.size rank = prob.comm.rank # cases will be split across files for each proc filename = "cases.sql_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size)) for n in range(num_cases): outputs = cases.get_case(n).outputs idx = n * size + rank # index of expected case self.assertEqual(outputs['x'], expected[idx]['x']) self.assertEqual(outputs['y'], expected[idx]['y']) self.assertEqual(outputs['f_xy'], expected[idx]['f_xy']) # total number of cases recorded across all procs num_cases = prob.comm.allgather(num_cases) self.assertEqual(sum(num_cases), len(expected))
def collect_results(self, cases_to_collect='default', print_in_log=True): # type: (Union[str, list]) -> Dict[dict] """Print the results that were stored in the case reader. Parameters ---------- cases_to_collect : str or list Setting on which cases should be print (e.g. 'last', 'all', 'default', [2, 3, 5] print_in_log : bool Setting on whether the results should also be printed in the log Returns ------- results : dict of dicts Dictionary containing the results that were collected """ if not os.path.isfile(self.case_reader_path): raise AssertionError('Could not find the case reader file {}.'.format(self.case_reader_path)) if not isinstance(cases_to_collect, (str, list)): raise AssertionError('cases_to_print must be of type str or list.') if isinstance(cases_to_collect, str): if cases_to_collect not in ['default', 'all', 'last']: raise AssertionError('Invalid cases_to_print string value provided.') if cases_to_collect == 'default': if self.driver_type == 'doe': cases_to_collect = 'all' elif self.driver_type == 'optimizer': cases_to_collect = 'last' else: cases_to_collect = 'last' results = dict() # Get all cases from the case reader and determine amount of cases cases = CaseReader(self.case_reader_path).driver_cases num_cases = cases.num_cases if num_cases == 0: raise AssertionError('No cases were recorded and therefore no results can be collected. Note that ' 'collect_results only works after the driver has been run.') # Change cases_to_print to a list of integers with case numbers if isinstance(cases_to_collect, str): if cases_to_collect == 'all': cases_to_collect = range(num_cases) elif cases_to_collect == 'last': cases_to_collect = [num_cases - 1] # Print results print_optional('\nPrinting results from case reader: {}.'.format(self.case_reader_path), print_in_log) if self.driver.fail: if self.driver_type == 'optimizer': print_optional('Optimum not found in driver execution!', print_in_log) else: print_optional('Driver failed for some reason!', print_in_log) else: if self.driver_type == 'optimizer': print_optional('Optimum found!', print_in_log) else: print_optional('Driver finished!', print_in_log) print_optional('\nPrinting case numbers: {}'.format(cases_to_collect), print_in_log) for num_case in cases_to_collect: case = cases.get_case(num_case) print_optional('\n\n Case {}/{} ({})'.format(num_case, num_cases-1, case.iteration_coordinate), print_in_log) recorded_objectives = case.get_objectives() recorded__desvars = case.get_desvars() recorded_constraints = case.get_constraints() # TODO: Temporary fix due to issue with OpenMDAO 2.4.0, remove with new release of OpenMDAO try: var_objectives = sorted(list(recorded_objectives.keys())) var_desvars = sorted(list(recorded__desvars.keys())) var_constraints = sorted(list(recorded_constraints.keys())) except TypeError: var_objectives = sorted(list(recorded_objectives.keys)) var_desvars = sorted(list(recorded__desvars.keys)) var_constraints = sorted(list(recorded_constraints.keys)) var_does = sorted([elem.text for elem in self.elem_arch_elems .findall('parameters/doeOutputSampleLists/doeOutputSampleList/relatedParameterUID')]) var_convs = sorted([elem.text for elem in self.elem_problem_def .findall('problemRoles/parameters/stateVariables/stateVariable/parameterUID')]) # Print objective if var_objectives: print_optional(' Objectives', print_in_log) for var_objective in var_objectives: value = recorded_objectives[var_objective] print_optional(' {}: {}'.format(var_objective, value), print_in_log) results = add_or_append_dict_entry(results, 'objectives', var_objective, value) # Print design variables # TODO: Add bounds (currently a bug in OpenMDAO recorders) if var_desvars: print_optional('\n Design variables', print_in_log) for var_desvar in var_desvars: value = recorded__desvars[var_desvar] print_optional(' {}: {}'.format(var_desvar, value), print_in_log) results = add_or_append_dict_entry(results, 'desvars', var_desvar, value) # Print constraint values # TODO: Add bounds (currently a bug in OpenMDAO recorders) if var_constraints: print_optional('\n Constraints', print_in_log) for var_constraint in var_constraints: value = recorded_constraints[var_constraint] print_optional(' {}: {}'.format(var_constraint, value), print_in_log) results = add_or_append_dict_entry(results, 'constraints', var_constraint, value) # Print DOE quantities of interest if var_does: print_optional('\n Quantifies of interest', print_in_log) for var_qoi in var_does: value = case.outputs[var_qoi] print_optional(' {}: {}'.format(var_qoi, value), print_in_log) results = add_or_append_dict_entry(results, 'qois', var_qoi, value) # Print other quantities of interest title_not_printed = True if var_convs: for var_qoi in var_convs: if var_qoi not in var_objectives + var_constraints + var_does: if title_not_printed: print_optional('\n Quantifies of interest', print_in_log) title_not_printed = False value = case.outputs[var_qoi] print_optional(' {}: {}'.format(var_qoi, value), print_in_log) results = add_or_append_dict_entry(results, 'qois', var_qoi, value)
upper=sys.float_info.max) pb.model.add_design_var('p', lower=-sys.float_info.max, upper=sys.float_info.max) pb.model.add_design_var('PayCap', lower=-sys.float_info.max, upper=sys.float_info.max) pb.model.add_objective('C_TOTAL') pb.setup() pb.run_driver() if options.batch or options.parallel: exit(0) reader = CaseReader(case_recorder_filename) cases = reader.list_cases('driver') n = len(cases) data = {'inputs': {}, 'outputs': {}} data['inputs']['LpA'] = np.zeros((n, ) + (1, )) data['inputs']['Ns'] = np.zeros((n, ) + (1, )) data['inputs']['p'] = np.zeros((n, ) + (1, )) data['inputs']['PayCap'] = np.zeros((n, ) + (1, )) data['outputs']['C_TOTAL'] = np.zeros((n, ) + (1, )) for i in range(len(cases)): case = reader.get_case(cases[i]) data['inputs']['LpA'][i, :] = case.outputs['LpA'] data['inputs']['Ns'][i, :] = case.outputs['Ns']
def test_resids(self): """ Test that params is None if not provided in the recording. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) self.assertIsNone(last_case.resids, "Case erroneously contains resids.")
def test_format_version(self): cr = CaseReader(self.filename) self.assertEqual(cr.format_version, format_version, msg='format version not read correctly')
if __name__ == "__main__": scalers = init_ssbj_mda() # Pick up values from recorder cr_file_folder_name = 'files' cr_file_key_word = 'bliss_newrun4' n_loop = 13 des_vars_list = pickle.load( open( os.path.join( cr_file_folder_name, 'ssbj_des_vars_{}_system_loops.p'.format(cr_file_key_word)), 'rb')) cr_sys = CaseReader( os.path.join( cr_file_folder_name, 'ssbj_cr_{}_system_loop{:02d}.sql'.format(cr_file_key_word, n_loop))) n_loops = len(des_vars_list) # Get last case case = cr_sys.driver_cases.get_case(-1) des_vars_sh = case.outputs['z_sh'] prob = Problem() prob.model = SSBJ_MDA(scalers) prob.setup() prob['z'] = des_vars_sh prob['z'][0] = 1.2 prob['x_str'] = np.array([1.6, 0.75]) prob['x_aer'] = np.array([0.75]) prob['x_pro'] = np.array([0.3125])
def _load_results(self, filename): """ Load PhaseSimulationResults from the given file. Parameters ---------- filename : str The path of the file from which to load the simulation results. """ cr = CaseReader(filename) case = cr.system_cases.get_case(-1) loaded_outputs = cr.list_outputs(case=case, explicit=True, implicit=True, values=True, units=True, shape=True, out_stream=None) self.outputs = { 'indep': {}, 'states': {}, 'controls': {}, 'control_rates': {}, 'design_parameters': {}, 'input_parameters': {}, 'ode': {} } for output_name, options in loaded_outputs: if output_name.startswith('inputs.'): output_name = output_name.replace('inputs.', '') if output_name == 'time': var_type = 'indep' var_name = 'time' if output_name.startswith('states:'): var_type = 'states' var_name = output_name.replace('states:', '', 1) elif output_name.startswith('controls:'): var_type = 'controls' var_name = output_name.replace('controls:', '', 1) elif output_name.startswith('control_rates:'): var_type = 'control_rates' var_name = output_name.replace('control_rates:', '', 1) elif output_name.startswith('design_parameters:'): var_type = 'design_parameters' var_name = output_name.replace('design_parameters:', '', 1) # elif output_name.startswith('traj_design_parameters:'): # var_type = 'traj_design_parameters' # var_name = output_name.replace('traj_design_parameters:', '', 1) val = options['value'] elif output_name.startswith('ode.'): var_type = 'ode' var_name = output_name.replace('ode.', '') if len(options['value'].shape) == 1: val = options['value'][:, np.newaxis] else: val = options['value'] else: raise RuntimeError('unexpected output in file {1}: {0}'.format( output_name, filename)) self.outputs[var_type][var_name] = {} self.outputs[var_type][var_name]['value'] = val self.outputs[var_type][var_name]['units'] = convert_to_ascii( options['units']) self.outputs[var_type][var_name]['shape'] = tuple(val.shape[1:])
resuTime = [] resuMrho = [] resuCO2 = [] resuCases = [] resuEngine = [] #VMGM resuTaper = [] resuEngineW = [] resuBuckling = [] resuRho = [] resuSpan = [] for casenbr in range(0, len(cases), 1): # cr = CaseReader("articleMats/aerostructMrhoi"+str(cases[casenbr][1])+"sk"+str(cases[casenbr][0])+"sr"+str(cases[casenbr][4])+"sn"+str(cases[casenbr][2])+"tc"+str(cases[casenbr][3])+".db") cr = CaseReader("aerostructMrhoi" + str(cases[casenbr][4]) + "sk" + str(cases[casenbr][0]) + "sr" + str(cases[casenbr][1]) + "sn" + str(cases[casenbr][2]) + "tc" + str(cases[casenbr][3]) + ".db") driver_cases = cr.list_cases('driver') iterations = len(driver_cases) case = cr.get_case(driver_cases[-1]) design_vars = case.get_design_vars() objective = case.get_objectives() constraints = case.get_constraints() rhorho = design_vars['mrho'] weight = case.outputs['wing.structural_mass'][0] co2 = objective['emitted_co2'][0] engine_location = case.outputs['engine_location'][0] #VMGM taper = case.inputs['wing.geometry.mesh.taper.taper'][0] span = case.inputs['wing.geometry.mesh.stretch.span'][0] engineW = case.outputs['point_masses'][0]
def compute(self, inputs, outputs): # type: (Vector, Vector) -> None """Computation performed by the component. Parameters ---------- inputs : all inputs coming from outside the component in the group outputs : all outputs provided outside the component in the group""" # Define problem of subdriver p = self.prob m = p.model # Push global inputs down for input_name in m.model_constants: p[input_name] = inputs[input_name] failed_experiments = {} # sort to have outputs first sorted_model_super_inputs = sorted(m.model_super_inputs.keys(), reverse=True) for input_name in sorted_model_super_inputs: if input_name in m.sm_of_training_params.keys(): # Add these inputs as training data sm_uid = m.sm_of_training_params[input_name] pred_param = m.find_mapped_parameter(input_name, m.sm_prediction_inputs[sm_uid] | m.sm_prediction_outputs[sm_uid]) sm_comp = getattr(m, str_to_valid_sys_name(sm_uid)) if sm_uid not in failed_experiments.keys(): failed_experiments[sm_uid] = (None, None) sm_comp.options['train:'+pred_param], failed_experiments[sm_uid]\ = p.postprocess_experiments(inputs[input_name], input_name, failed_experiments[sm_uid]) else: p[input_name] = inputs[input_name] # Provide message on failed experiments warn_about_failed_experiments(failed_experiments) # Set initial values of design variables back to original ones (to avoid using values of # last run) for des_var, attrbs in m.design_vars.items(): p[des_var] = attrbs['initial'] # Run the driver print('Running subdriver {}'.format(self.options['driver_uid'])) if 'Sub-Optimizer' not in p.case_reader_path: p.driver.cleanup() basename, extension = os.path.splitext(p.case_reader_path) case_reader_filename = basename + '_loop' + str(self._run_count) + extension p.driver.add_recorder(SqliteRecorder(case_reader_filename)) p.driver.recording_options['includes'] = ['*'] p.driver.recording_options['record_model_metadata'] = True p.driver._setup_recording() p.run_driver() self._add_run_count() # Pull the value back up to the output array doe_out_vecs = {} for output_name in m.model_super_outputs: # Add these outputs as vectors based on DOE driver if output_name in m.doe_parameters.keys(): doe_out_vecs[output_name] = [] else: if not p.driver.fail: outputs[output_name] = p[output_name] else: outputs[output_name] = float('nan') # If the driver failed (hence, optimization failed), then send message and clean if p.driver.fail: print('Driver run failed!') p.clean_driver_after_failure() # Provide DOE output vectors as output of the component, if this is expected if doe_out_vecs: # First read out the case reader cr = CaseReader(case_reader_filename) cases = cr.list_cases('driver') for n in range(len(cases)): cr_outputs = cr.get_case(n).outputs doe_param_matches = {} for output_name in doe_out_vecs.keys(): doe_param_matches[output_name] = doe_param_match \ = m.find_mapped_parameter(output_name, cr_outputs.keys()) doe_out_vecs[output_name].append(cr_outputs[doe_param_match][0]) # Then write the final vectors to the global output array for output_name in doe_out_vecs.keys(): if output_name in p.doe_samples[p.driver_uid]['inputs']: des_var_match = m.find_mapped_parameter(output_name, m._design_vars.keys()) doe_out_vecs[output_name] = unscale_value(doe_out_vecs[output_name], m._design_vars[des_var_match]['ref0'], m._design_vars[des_var_match]['ref']) outputs[output_name] = np.array(doe_out_vecs[output_name])
def _load_results(self, filename): """ Load PhaseSimulationResults from the given file. Parameters ---------- filename : str The path of the file from which to load the simulation results. """ cr = CaseReader(filename) case = cr.system_cases.get_case(-1) loaded_outputs = cr.list_outputs(case=case, explicit=True, implicit=True, values=True, units=True, shape=True, out_stream=None) phase_names = set([ s[0].split('.')[1] for s in loaded_outputs if s[0].startswith('phases.') ]) for phase_name in phase_names: self.outputs['phases'][phase_name] = {} self.outputs['phases'][phase_name]['indep'] = {} self.outputs['phases'][phase_name]['states'] = {} self.outputs['phases'][phase_name]['controls'] = {} self.outputs['phases'][phase_name]['control_rates'] = {} self.outputs['phases'][phase_name]['design_parameters'] = {} self.outputs['phases'][phase_name]['input_parameters'] = {} self.outputs['phases'][phase_name]['ode'] = {} for name, options in loaded_outputs: if name.startswith('phases'): phase_name = name.split('.')[1] output_name = name.replace('phases.{0}.'.format(phase_name), '') if output_name.startswith('inputs.'): output_name = output_name.replace('inputs.', '') if output_name == 'time': var_type = 'indep' var_name = 'time' if output_name.startswith('states:'): var_type = 'states' var_name = output_name.split(':')[-1] elif output_name.startswith('controls:'): var_type = 'controls' var_name = output_name.split(':')[-1] elif output_name.startswith('control_rates:'): var_type = 'control_rates' var_name = output_name.split(':')[-1] elif output_name.startswith('design_parameters:'): var_type = 'design_parameters' var_name = output_name.split(':')[-1] elif output_name.startswith('input_parameters:'): var_type = 'input_parameters' var_name = output_name.split(':')[-1] elif output_name.startswith('ode.'): var_type = 'ode' var_name = output_name.replace('ode.', '') else: raise RuntimeError( 'unexpected output in phase {1}: {0}'.format( name, phase_name)) self.outputs['phases'][phase_name][var_type][var_name] = {} self.outputs['phases'][phase_name][var_type][var_name][ 'value'] = options['value'] self.outputs['phases'][phase_name][var_type][var_name]['units'] = \ convert_to_ascii(options['units']) self.outputs['phases'][phase_name][var_type][var_name][ 'shape'] = options['shape']
# model.add_design_var('eta_sa', lower = 0.01, upper=1) model.add_design_var('M_ps', lower=0.01, upper=100) model.add_constraint('con1',) model.add_objective('t_tot') #Case Recorder Setting recorder = SqliteRecorder('test.sql') p.driver.add_recorder(recorder) p.add_recorder(recorder) #Set-up and Run p.setup() p.set_solver_print(2) p.run_driver() # view_model(p) p.record_iteration('final') p.cleanup() cr = CaseReader('test.sql') drivercases = cr.list_cases('driver') case = cr.get_case(drivercases[0]) print(sorted(case.outputs.keys())) print('P_req',['P_req']) print('A_sa',p['A_sa']) print('M_u',p['M_u']) print('M_ps',p['M_ps']) print(p['t_tot']) print(p['cycles'])
# -*- coding: utf-8 -*- """ Created on Fri Apr 26 17:06:34 2019 @author: e.duriez """ from openmdao.api import Problem, ScipyOptimizeDriver, SqliteRecorder, CaseReader import matplotlib.pyplot as plt import numpy as np #cr = CaseReader("aerostructMrhoi504.5sk0.002sr0.0001sn10tc0.13.db") # 65kg cr = CaseReader("aerostructMrhoi504.5sk0.002sr0.0001sn50tc0.05.db") # 46kg #cr = CaseReader("aerostructMrhoi504.5sk0.002sr0.00030000000000000003sn10tc0.05.db") # 86kg #cr = CaseReader("aerostructMrhoi504.5sk0.002sr0.00030000000000000003sn10tc0.13.db") # 63kg driver_cases = cr.list_cases('driver') iterations = len(driver_cases) mrho = [] masse = [] co2 = [] taper = [] span = [] chord = [] chordTip = [] surf = [] surface0 = [] surface1 = [] sparThicknessRoot = [] sparThicknessTip = []
def get_case_reader(data_dir, problem_constants): return CaseReader(recording_filename(data_dir, problem_constants.id))
def test_record_default_file(self, transcription='gauss-lobatto', top_level_jacobian='csc', optimizer='slsqp'): p = Problem(model=Group()) if optimizer == 'SNOPT': p.driver = pyOptSparseDriver() p.driver.options['optimizer'] = OPTIMIZER p.driver.opt_settings['Major iterations limit'] = 100 p.driver.opt_settings['iSumm'] = 6 p.driver.opt_settings['Verify level'] = 3 else: p.driver = ScipyOptimizeDriver() p.driver.options['dynamic_simul_derivs'] = True phase = Phase(transcription, ode_class=BrachistochroneODE, num_segments=8, transcription_order=3) p.model.add_subsystem('phase0', phase) phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10)) phase.set_state_options('x', fix_initial=True, fix_final=True) phase.set_state_options('y', fix_initial=True, fix_final=True) phase.set_state_options('v', fix_initial=True) phase.add_control('theta', units='deg', rate_continuity=True, lower=0.01, upper=179.9) # Minimize time at the end of the phase phase.add_objective('time', loc='final', scaler=10) p.model.linear_solver = DirectSolver(assemble_jac=True) p.model.options['assembled_jac_type'] = top_level_jacobian.lower() p.setup() p['phase0.t_initial'] = 0.0 p['phase0.t_duration'] = 2.0 p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input') p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input') p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input') p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input') p.run_driver() exp_out = phase.simulate(times=np.linspace(p['phase0.t_initial'], p['phase0.t_initial'] + p['phase0.t_duration'], 50)) cr = CaseReader('phase0_sim.db') last_case = cr.system_cases.get_case(-1) for var in ['time', 'states:x', 'states:y', 'states:v', 'controls:theta']: if ':' in var: _var = var.split(':')[-1] else: _var = var assert_almost_equal(last_case.outputs[var].ravel(), exp_out.get_values(_var).ravel()) loaded_exp_out = load_simulation_results('phase0_sim.db') for var in ['time', 'x', 'y', 'v', 'theta']: assert_almost_equal(exp_out.get_values(var).ravel(), loaded_exp_out.get_values(var).ravel())
prob.set_val("traj.phase0.t_initial", 0, units="s") prob.set_val("traj.phase0.t_duration", 2000, units="s") prob.set_val("traj.phase0.controls:alpha", phase0.interpolate(ys=[17.4*np.pi/180, 17.4*np.pi/180], nodes="control_input"), units="rad") prob.set_val("traj.phase0.controls:beta", phase0.interpolate(ys=[-75*np.pi/180, 0*np.pi/180], nodes="control_input"), units="rad") recorder = SqliteRecorder("reentry.sql") prob.driver.add_recorder(recorder) prob.run_driver() # prob.run_model() sim_out = traj.simulate() prob.cleanup() case_reader = CaseReader("reentry.sql") driver_cases = case_reader.list_cases("driver") last_case = case_reader.get_case(driver_cases[-1]) final_constraints = last_case.get_constraints() final_q = final_constraints["traj.phase0.path_constraints.path:q"] plt.figure(0) plt.plot(prob.get_val("traj.phase0.timeseries.time", units="s"), prob.get_val("traj.phase0.timeseries.controls:alpha", units="deg"), "ro", label="Solution") plt.plot(sim_out.get_val("traj.phase0.timeseries.time", units="s"), sim_out.get_val("traj.phase0.timeseries.controls:alpha", units="deg"), "b-", label="Simulation") plt.title("Angle of Attack over Time") plt.xlabel("Time (s)") plt.ylabel("Angle of Attack (degrees)") plt.legend() plt.figure(1)
def test_box_behnken(self): upper = 10. center = 1 prob = Problem() model = prob.model indep = model.add_subsystem('indep', IndepVarComp(), promotes=['*']) indep.add_output('x', 0.0) indep.add_output('y', 0.0) indep.add_output('z', 0.0) model.add_subsystem('comp', ExecComp('a = x**2 + y - z'), promotes=['*']) model.add_design_var('x', lower=0., upper=upper) model.add_design_var('y', lower=0., upper=upper) model.add_design_var('z', lower=0., upper=upper) model.add_objective('a') prob.driver = DOEDriver(BoxBehnkenGenerator(center=center)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("cases.sql").driver_cases # The Box-Behnken design for 3 factors involves three blocks, in each of # which 2 factors are varied thru the 4 possible combinations of high & low. # It also includes centre points (all factors at their central values). # ref: https://en.wikipedia.org/wiki/Box-Behnken_design self.assertEqual(cases.num_cases, (3 * 4) + center) expected = { 0: { 'x': np.array([0.]), 'y': np.array([0.]), 'z': np.array([5.]) }, 1: { 'x': np.array([10.]), 'y': np.array([0.]), 'z': np.array([5.]) }, 2: { 'x': np.array([0.]), 'y': np.array([10.]), 'z': np.array([5.]) }, 3: { 'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([5.]) }, 4: { 'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([0.]) }, 5: { 'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([0.]) }, 6: { 'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([10.]) }, 7: { 'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([10.]) }, 8: { 'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([0.]) }, 9: { 'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([0.]) }, 10: { 'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([10.]) }, 11: { 'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([10.]) }, 12: { 'x': np.array([5.]), 'y': np.array([5.]), 'z': np.array([5.]) }, } for n in range(cases.num_cases): outputs = cases.get_case(n).outputs self.assertEqual(outputs['x'], expected[n]['x']) self.assertEqual(outputs['y'], expected[n]['y']) self.assertEqual(outputs['z'], expected[n]['z'])
def test_brachistochrone_recording(self): import matplotlib matplotlib.use('Agg') from openmdao.api import Problem, Group, ScipyOptimizeDriver, DirectSolver, \ SqliteRecorder, CaseReader from openmdao.utils.assert_utils import assert_rel_error from dymos import Phase, GaussLobatto from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE p = Problem(model=Group()) p.driver = ScipyOptimizeDriver() phase = Phase(ode_class=BrachistochroneODE, transcription=GaussLobatto(num_segments=10)) p.model.add_subsystem('phase0', phase) phase.set_time_options(initial_bounds=(0, 0), duration_bounds=(.5, 10)) phase.set_state_options('x', fix_initial=True, fix_final=True) phase.set_state_options('y', fix_initial=True, fix_final=True) phase.set_state_options('v', fix_initial=True) phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9) phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665) # Minimize time at the end of the phase phase.add_objective('time', loc='final', scaler=10) p.model.linear_solver = DirectSolver() # Recording rec = SqliteRecorder('brachistochrone_solution.db') p.driver.recording_options['record_desvars'] = True p.driver.recording_options['record_responses'] = True p.driver.recording_options['record_objectives'] = True p.driver.recording_options['record_constraints'] = True p.model.recording_options['record_metadata'] = True p.driver.add_recorder(rec) p.model.add_recorder(rec) phase.add_recorder(rec) p.setup() p['phase0.t_initial'] = 0.0 p['phase0.t_duration'] = 2.0 p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input') p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input') p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input') p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100.5], nodes='control_input') # Solve for the optimal trajectory p.run_driver() # Test the results assert_rel_error(self, p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3) cr = CaseReader('brachistochrone_solution.db') system_cases = cr.list_cases('root') case = cr.get_case(system_cases[-1]) outputs = dict([ (o[0], o[1]) for o in case.list_outputs(units=True, shape=True, out_stream=None) ]) assert_rel_error( self, p['phase0.controls:theta'], outputs['phase0.control_group.indep_controls.controls:theta'] ['value'])
def test_latin_hypercube(self): samples = 4 bounds = np.array([ [-1, -10], # lower bounds for x and y [1, 10] # upper bounds for x and y ]) xlb, xub = bounds[0][0], bounds[1][0] ylb, yub = bounds[0][1], bounds[1][1] prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=xlb, upper=xub) model.add_design_var('y', lower=ylb, upper=yub) model.add_objective('f_xy') prob.driver = DOEDriver() prob.driver.options['generator'] = LatinHypercubeGenerator(samples=4, seed=0) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() # the sample space for each variable should be divided into equal # size buckets and each variable should have a value in each bucket all_buckets = set(range(samples)) xlb, xub = bounds[0][0], bounds[1][0] x_offset = 0 - xlb x_bucket_size = xub - xlb x_buckets_filled = set() ylb, yub = bounds[0][1], bounds[1][1] y_offset = 0 - ylb y_bucket_size = yub - ylb y_buckets_filled = set() # expected values for seed = 0 expected = { 0: { 'x': np.array([-0.19861831]), 'y': np.array([-6.42405317]) }, 1: { 'x': np.array([0.2118274]), 'y': np.array([9.458865]) }, 2: { 'x': np.array([0.71879361]), 'y': np.array([3.22947057]) }, 3: { 'x': np.array([-0.72559325]), 'y': np.array([-2.27558409]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs x = outputs['x'] y = outputs['y'] bucket = int((x + x_offset) / (x_bucket_size / samples)) x_buckets_filled.add(bucket) bucket = int((y + y_offset) / (y_bucket_size / samples)) y_buckets_filled.add(bucket) assert_rel_error(self, x, expected[n]['x'], 1e-4) assert_rel_error(self, y, expected[n]['y'], 1e-4) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
def test_reader_instantiates(self): """ Test that CaseReader returns an HDF5CaseReader. """ cr = CaseReader(self.filename) self.assertTrue(isinstance(cr, HDF5CaseReader), msg='CaseReader not' ' returning the correct subclass.')
def test_latin_hypercube_array(self): samples = 4 bounds = np.array([ [-10, -50], # lower bounds for x and y [10, 50] # upper bounds for x and y ]) prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('xy', np.array([50., 50.])), promotes=['*']) model.add_subsystem('comp', ParaboloidArray(), promotes=['*']) model.add_design_var('xy', lower=bounds[0], upper=bounds[1]) model.add_objective('f_xy') prob.driver = DOEDriver(LatinHypercubeGenerator(samples=4, seed=0)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() # the sample space for each variable should be divided into equal # size buckets and each variable should have a value in each bucket all_buckets = set(range(samples)) xlb, xub = bounds[0][0], bounds[1][0] x_offset = 0 - xlb x_bucket_size = xub - xlb x_buckets_filled = set() ylb, yub = bounds[0][1], bounds[1][1] y_offset = 0 - ylb y_bucket_size = yub - ylb y_buckets_filled = set() # expected values for seed = 0 expected = { 0: { 'xy': np.array([-1.98618312, -32.12026584]) }, 1: { 'xy': np.array([2.118274, 47.29432502]) }, 2: { 'xy': np.array([7.18793606, 16.14735283]) }, 3: { 'xy': np.array([-7.25593248, -11.37792043]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs x = outputs['xy'][0] y = outputs['xy'][1] bucket = int((x + x_offset) / (x_bucket_size / samples)) x_buckets_filled.add(bucket) bucket = int((y + y_offset) / (y_bucket_size / samples)) y_buckets_filled.add(bucket) assert_rel_error(self, x, expected[n]['xy'][0], 1e-4) assert_rel_error(self, y, expected[n]['xy'][1], 1e-4) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
def test_derivs(self): """ Test that derivs is None if not provided in the recording. """ cr = CaseReader(self.filename) last_case = cr.get_case(-1) self.assertIsNone(last_case.derivs, "Case erroneously contains derivs.")
def test_latin_hypercube_center(self): samples = 4 upper = 10. prob = Problem() model = prob.model indep = model.add_subsystem('indep', IndepVarComp()) indep.add_output('x', 0.0) indep.add_output('y', 0.0) model.add_subsystem('comp', Paraboloid()) model.connect('indep.x', 'comp.x') model.connect('indep.y', 'comp.y') model.add_design_var('indep.x', lower=0., upper=upper) model.add_design_var('indep.y', lower=0., upper=upper) model.add_objective('comp.f_xy') prob.driver = DOEDriver( LatinHypercubeGenerator(samples=samples, criterion='c')) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, samples) # the sample space for each variable (0 to upper) should be divided into # equal size buckets and each variable should have a value in each bucket bucket_size = upper / samples all_buckets = set(range(samples)) x_buckets_filled = set() y_buckets_filled = set() # with criterion of 'center', each value should be in the center of it's bucket valid_values = [ round(bucket_size * (bucket + 1 / 2), 3) for bucket in all_buckets ] for n in range(cases.num_cases): outputs = cases.get_case(n).outputs x = float(outputs['indep.x']) y = float(outputs['indep.y']) x_buckets_filled.add(int(x / bucket_size)) y_buckets_filled.add(int(y / bucket_size)) self.assertTrue( round(x, 3) in valid_values, '%f not in %s' % (x, valid_values)) self.assertTrue( round(y, 3) in valid_values, '%f not in %s' % (y, valid_values)) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
""" Plots objective and constraint histories from the recorded data in 'data.sql'. """ from __future__ import print_function from six.moves import range import numpy as np from matplotlib import pylab from openmdao.api import CaseReader # load cases from recording database cr = CaseReader('ru_mdp.sql') #cases = cr.get_cases('driver') cases = cr.list_cases('driver') case = cr.get_case(cases[0]) num_cases = len(cases) if num_cases == 0: print('No data yet...') quit() else: print('# cases:', num_cases) # determine # of constraints constraints = list(case.get_constraints()) n_con = len(constraints) # collect data into arrays for plotting
""" Plots objective and constraint histories from the recorded data in 'data.sql'. """ from __future__ import print_function from six.moves import range import numpy as np from matplotlib import pylab from openmdao.api import CaseReader # load cases from recording database cases = CaseReader('data.sql').driver_cases cases.load_cases() num_cases = cases.num_cases if num_cases == 0: print('No data yet...') quit() else: print('# cases:', num_cases) # determine the # of points (5 constraints per point) constraints = cases.get_case(0).get_constraints().keys n_point = len(constraints) // 5 # collect data into arrays for plotting X = np.zeros(num_cases) # obj.val Y = np.zeros(num_cases) # sum of constraints Z = np.zeros((num_cases, 5)) # constraints