def test_restore(self): # Restore from case, run, verify outputs match expected. top = set_as_top(SellarMDF()) #top.name = 'top' top.recorders = [JSONCaseRecorder()] top.run() assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001) assert_rel_error(self, top.half.z2a, 0., .0001) assert_rel_error(self, top.sub.x1, 0., .0001) assert_rel_error(self, top.sub.states.y[0], 3.160004, .0001) assert_rel_error(self, top.sub.states.y[1], 3.755280, .0001) assert_rel_error(self, top.driver.eval_objective(), 3.18339413394, .0001) cds = CaseDataset('cases.json', 'json') cases = cds.data.fetch() n_orig = len(cases) # Typically 142 top = set_as_top(SellarMDF()) top._setup() cds.restore(top, cases[-1]['_id']) top.recorders = [JSONCaseRecorder('cases.restored')] top.run() assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001) assert_rel_error(self, top.half.z2a, 0., .0001) assert_rel_error(self, top.sub.x1, 0., .0001) assert_rel_error(self, top.sub.states.y[0], 3.160000, .0001) assert_rel_error(self, top.sub.states.y[1], 3.755278, .0001) assert_rel_error(self, top.driver.eval_objective(), 3.18339397762, .0001) cases = CaseDataset('cases.restored', 'json').data.fetch() # Exact case counts are unreliable, just assure restore was quicker. self.assertTrue(len(cases) < n_orig / 4) # Typically 15
def test_vtree(self): top = Assembly() sub = top.add('sub', Assembly()) sub.add('comp', LoadsComp()) sub.driver.workflow.add('comp') sub.create_passthrough('comp.loads_in') sub.create_passthrough('comp.loads_out') top.driver.workflow.add('sub') jsonfile = os.path.join(self.tempdir, 'test_vtree.json') old_json_file = os.path.join(os.path.dirname(__file__), 'vtree.json') top.recorders = [JSONCaseRecorder(jsonfile)] loads = Loads() loads.Fx = [1, 2, 3] loads.Fy = [4, 5, 6] loads.Fz = [7, 8, 9] arr = LoadsArray() arr.loads = [loads] top.sub.loads_in = arr top.run() cdsnew = CaseDataset(jsonfile, 'json') cdsold = CaseDataset(old_json_file, 'json') cdsold.data.vars('sub.comp.loads_out').fetch( )[0][0]['loads'][0]['Fx'] == cdsnew.data.vars( 'sub.comp.loads_out').fetch()[0][0]['loads'][0]['Fx'] cdsold.data.vars('sub.comp.loads_out').fetch( )[1][0]['loads'][0]['Fz'] == cdsnew.data.vars( 'sub.comp.loads_out').fetch()[1][0]['loads'][0]['Fz']
def test_restore(self): # Restore from case, run, verify outputs match expected. top = set_as_top(SellarMDF()) #top.name = 'top' top.recorders = [JSONCaseRecorder()] top.run() assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001) assert_rel_error(self, top.half.z2a, 0., .0001) assert_rel_error(self, top.sub.x1, 0., .0001) assert_rel_error(self, top.sub.states.y[0], 3.160004, .0001) assert_rel_error(self, top.sub.states.y[1], 3.755280, .0001) assert_rel_error(self, top.driver.eval_objective(), 3.18339413394, .0001) cds = CaseDataset('cases.json', 'json') cases = cds.data.fetch() n_orig = len(cases) # Typically 142 top = set_as_top(SellarMDF()) top._setup() cds.restore(top, cases[-1]['_id']) top.recorders = [JSONCaseRecorder('cases.restored')] top.run() assert_rel_error(self, top.sub.globals.z1, 1.977639, .0001) assert_rel_error(self, top.half.z2a, 0., .0001) assert_rel_error(self, top.sub.x1, 0., .0001) assert_rel_error(self, top.sub.states.y[0], 3.160000, .0001) assert_rel_error(self, top.sub.states.y[1], 3.755278, .0001) assert_rel_error(self, top.driver.eval_objective(), 3.18339397762, .0001) cases = CaseDataset('cases.restored', 'json').data.fetch() # Exact case counts are unreliable, just assure restore was quicker. self.assertTrue(len(cases) < n_orig/4) # Typically 15
def test_json(self): # Simple check of _JSONReader. path = os.path.join(os.path.dirname(__file__), 'jsonrecorder.json') cases = CaseDataset(path, 'json').data.fetch() self.assertEqual(len(cases), 10) path = os.path.join(os.path.dirname(__file__), 'truncated.json') cases = CaseDataset(path, 'json').data.fetch() self.assertEqual(len(cases), 7)
def test_write(self): # Read in a dataset and write out a selected portion of it. path = os.path.join(os.path.dirname(__file__), 'jsonrecorder.json') cases = CaseDataset(path, 'json').data.fetch() self.assertEqual(len(cases), 10) self.assertEqual(len(cases[0]), 19) names = ('comp1.x', 'comp1.y', 'comp1.z', 'comp2.z') CaseDataset(path, 'json').data.vars(names).write('cases.reduced') reduced = CaseDataset('cases.reduced', 'json').data.fetch() self.assertEqual(len(reduced), 10) self.assertEqual(len(reduced[0]), 10)
def setUp(self): #create_files() # Uncomment to create 'sellar.new' path = os.path.join(os.path.dirname(__file__), 'sellar.json') self.cds = CaseDataset(path, 'json') self.startdir = os.getcwd() self.tempdir = tempfile.mkdtemp(prefix='test_query-') os.chdir(self.tempdir)
def test_bson(self): # Simple check of _BSONReader. names = ['half.z2a', 'sub.globals.z1', 'sub.x1'] path = os.path.join(os.path.dirname(__file__), 'sellar.json') json_cases = CaseDataset(path, 'json').data.vars(names).fetch() path = os.path.join(os.path.dirname(__file__), 'sellar.bson') bson_cases = CaseDataset(path, 'bson').data.vars(*names).fetch() for json_case, bson_case in zip(json_cases, bson_cases): for json_val, bson_val in zip(json_case, bson_case): if isnan(json_val): self.assertTrue(isnan(bson_val)) else: self.assertEqual(bson_val, json_val)
def generate_and_compare(self, name): directory = os.path.dirname(__file__) name = os.path.join(directory, name) cds = CaseDataset(name + '.json', 'json') data = cds.data.fetch() caseset_query_to_csv(data, self.filename_csv) with open(name + '.csv', 'r') as inp1: expected = inp1.readlines() with open(self.filename_csv, 'r') as inp2: actual = inp2.readlines() # Strip off trailing whitespace (newlines and carriage returns) # Don't check time-stamp because some OS round it. for exp, act in zip(expected, actual): # skip timestamps, and uuids items2 = act.rstrip().split(",")[1:-3] for i, item1 in enumerate(exp.rstrip().split(",")[1:-3]): item2 = items2[i] try: # (str).isnumeric() only works on unicode item1, item2 = float(item1), float(item2) # nan equality check fails by definition if isnan(item1) and isnan(item2): continue self.assertEqual(item1, item2) except (ValueError, TypeError): self.assertEqual(item1, item2)
def test_simple(self): # Make sure the CSV file can be read and has the correct number of cases self.top.recorders = [JSONCaseRecorder(self.filename_json)] self.top.recorders[0].num_backups = 0 self.top.run() cds = CaseDataset(self.filename_json, 'json') data = cds.data.fetch() # results caseset_query_to_csv(data, self.filename_csv) cases = [case for case in CSVCaseIterator(filename=self.filename_csv)]
def test_options_with_includes_excludes(self): """ verify options with includes and excludes (excludes are processed after includes): save_problem_formulation = True includes = ['comp1'] excludes = ['*directory', '*force_fd', '*missing_deriv_policy'] """ sout = StringIO.StringIO() self.top.recorders = [JSONCaseRecorder(sout)] self.top.recording_options.includes = ['comp1*'] self.top.recording_options.excludes = [ '*directory', '*force_fd', '*missing_deriv_policy' ] self.top.run() sout.seek(0) # need to go back to the front of the "file" cds = CaseDataset(sout, 'json') constants = cds.simulation_info['constants'].keys() expected = [u'comp1.y'] self.assertFalse(set(constants) - set(expected)) vnames = cds.data.var_names().fetch() expected = [ '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0', u'_pseudo_1.out0', u'comp1.derivative_exec_count', u'comp1.exec_count', u'comp1.itername', u'comp1.x', u'comp1.z', 'error_message', 'error_status', 'timestamp' ] #self.assertFalse(set(vnames) - set(expected)) self.assertFalse(set(vnames).symmetric_difference(set(expected))) # Specific variables are there names = ['comp1.z', 'comp1.x'] vnames = cds.data.vars(names).var_names().fetch() self.assertEqual(vnames, names) cases = cds.data.vars(names).fetch() self.assertEqual(len(cases), 1) self.assertEqual(len(cases[0]), len(names)) iteration_case_1 = { "comp1.x": 0.0, "comp1.z": 0.0, } for name, val in zip(names, cases[0]): self.assertAlmostEqual(val, iteration_case_1[name])
def test_includes_only(self): """ verify options with includes but not problem formulation: save_problem_formulation = False includes = ['comp2*'] excludes = [] """ sout = StringIO.StringIO() self.top.recorders = [JSONCaseRecorder(sout)] self.top.recording_options.save_problem_formulation = False self.top.recording_options.includes = ['comp2*'] self.top.run() sout.seek(0) # need to go back to the front of the "file" cds = CaseDataset(sout, 'json') vnames = cds.data.var_names().fetch() expected = [ '_driver_id', '_id', '_parent_id', u'comp2.derivative_exec_count', u'comp2.exec_count', u'comp2.itername', u'comp2.z', 'error_message', 'error_status', 'timestamp' ] self.assertFalse(set(vnames) - set(expected)) constants = cds.simulation_info['constants'].keys() expected = [ u'comp2.directory', u'comp2.force_fd', u'comp2.missing_deriv_policy' ] self.assertFalse(set(constants) - set(expected)) # Specific variables. names = ['comp2.z'] vnames = cds.data.vars(names).var_names().fetch() self.assertEqual(vnames, names) cases = cds.data.vars(names).fetch() self.assertEqual(len(cases), 1) self.assertEqual(len(cases[0]), len(names)) iteration_case_1 = { "comp2.z": 1.0, } for name, val in zip(names, cases[0]): self.assertAlmostEqual(val, iteration_case_1[name])
def test_case_recording(self): cds_path = os.path.join(os.path.dirname(__file__), "cds.json") asm = set_as_top(SellarProblem()) asm.architecture = MDF() asm.recorders = [JSONCaseRecorder(cds_path)] asm.run() cds = CaseDataset(cds_path, 'json') data = cds.data.by_variable().fetch() self.assertFalse(set(asm.solution.keys()) - set(data.keys())) for var in asm.solution.keys(): self.assertTrue(data[var]) np.allclose(data[var], asm.solution[var]) del cds del data os.remove(cds_path)
def test_flatten(self): # try it after creating some Cases # more rigorous checking of the csv outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] cases = [Case(inputs=inputs, outputs=outputs)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.driver.clear_responses() self.top.driver.add_responses(outputs) self.top.recorders = [JSONCaseRecorder(self.filename_json)] self.top.recorders[0].num_backups = 0 self.top.run() cds = CaseDataset(self.filename_json, 'json') data = cds.data.fetch() # results caseset_query_to_csv(data, self.filename_csv) # check recorded cases cases = [case for case in CSVCaseIterator(filename=self.filename_csv)]
def test_default_options(self): # verify default options: # save_problem_formulation = True # includes = ['*'] # excludes = [] sout = StringIO.StringIO() self.top.recorders = [JSONCaseRecorder(sout)] self.top.run() sout.seek(0) # need to go back to the front of the "file" cds = CaseDataset(sout, 'json') vnames = cds.data.var_names().fetch() expected = [ '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0', u'_pseudo_1.out0', u'comp1.derivative_exec_count', u'comp1.exec_count', u'comp1.itername', u'comp1.x', u'comp1.z', u'comp2.derivative_exec_count', u'comp2.exec_count', u'comp2.itername', u'comp2.z', u'driver.workflow.itername', 'error_message', 'error_status', 'timestamp' ] self.assertFalse(set(vnames).symmetric_difference(set(expected))) # Specific variables. names = ['comp1.x', 'comp2.z', 'comp1.z'] vnames = cds.data.vars(names).var_names().fetch() self.assertEqual(vnames, names) cases = cds.data.vars(names).fetch() self.assertEqual(len(cases), 1) self.assertEqual(len(cases[0]), len(names)) iteration_case_1 = { "comp1.x": 0.0, "comp1.z": 0.0, "comp2.z": 1.0, } for name, val in zip(names, cases[0]): self.assertAlmostEqual(val, iteration_case_1[name])
def test_problem_formulation_only(self): """ verify options with no includes: save_problem_formulation = True includes = [] excludes = [] """ sout = StringIO.StringIO() self.top.recorders = [JSONCaseRecorder(sout)] self.top.recording_options.save_problem_formulation = True self.top.recording_options.includes = [] self.top.run() sout.seek(0) # need to go back to the front of the "file" cds = CaseDataset(sout, 'json') vnames = cds.data.var_names().fetch() expected = [ '_driver_id', '_id', '_parent_id', u'_pseudo_0.out0', u'_pseudo_1.out0', u'comp1.x', 'error_message', 'error_status', 'timestamp' ] self.assertFalse(set(vnames).symmetric_difference(set(expected))) # Specific variables. names = [ 'comp1.x', ] vnames = cds.data.vars(names).var_names().fetch() self.assertFalse(set(vnames).symmetric_difference(set(names))) cases = cds.data.vars(names).fetch() self.assertEqual(len(cases), 1) self.assertEqual(len(cases[0]), len(names)) iteration_case_1 = { "comp1.x": 0.0, } for name, val in zip(names, cases[0]): self.assertAlmostEqual(val, iteration_case_1[name])
def test_options_with_excludes(self): """ verify options with excludes: save_problem_formulation = True includes = ['*'] excludes = ['*directory', '*force_fd', '*missing_deriv_policy', '*gradient_options*'] """ sout = StringIO.StringIO() self.top.recorders = [JSONCaseRecorder(sout)] self.top.recording_options.excludes = [ '*directory', '*force_fd', '*missing_deriv_policy', '*gradient_options*' ] self.top.run() sout.seek(0) # need to go back to the front of the "file" cds = CaseDataset(sout, 'json') constants = cds.simulation_info['constants'].keys() expected = [ u'recording_options.save_problem_formulation', u'recording_options.includes', u'comp1.y', u'recording_options.excludes' ] self.assertFalse(set(constants) - set(expected))
# Connect Airline Allocation SubProblem Component with Branch and Bound Algorithm Component and the solver # Connect Branch and Bound Algorithm Component with the solver component self.connect('branchbound_algorithm.lb', 'nonlinopt.lb') self.connect('branchbound_algorithm.ub', 'nonlinopt.ub') self.driver.add_stop_condition('branchbound_algorithm.exec_loop != 0') self.driver.max_iterations = 1000000 self.recorders = [JSONCaseRecorder('nonlintest.json')] if __name__ == "__main__": from openmdao.lib.casehandlers.api import CaseDataset, caseset_query_to_html nlt = NonLinTest() #initial bounds for the optimization nlt.nonlinopt.lb = nlt.branchbound_algorithm.lb_init = [0., 0.] nlt.nonlinopt.ub = nlt.branchbound_algorithm.ub_init = [1e3, 1e3] nlt.run() cds = CaseDataset('nonlintest.json', 'json') caseset_query_to_html(cds.data, 'nonlintest.html') print "x_opt: ", nlt.branchbound_algorithm.xopt print "obj_opt: ", nlt.branchbound_algorithm.obj_opt # from openmdao.util.dotgraph import plot_graph # plot_graph(nlt._reduced_graph)
num_elem = 3000 num_cp_init = 10 num_cp_max = 10 # set to 200 for the sweep num_cp_step = 10 x_range = 15000.0 # END USER SPECIFIED INPUTS ########################### # initialize figure, set up folder-paths fig = matplotlib.pylab.figure(figsize=(18.0, 8.0)) nr, nc = 4, 3 # Read in the openmdao final dataset. cds1 = CaseDataset('mission_final_cp_10.bson', 'bson') final_data = cds1.data.fetch() # results final_data = final_data[-1] # Get last case. Earlier cases are sub-iterations. # Constants are stored here x_init = np.array(cds1.simulation_info['constants']['SysXBspline.x_init']) # Variables are stored in the cases. dist = np.array(final_data['SysXBspline.x']) altitude = np.array(final_data['SysHBspline.h']) speed = np.array(final_data['SysSpeed.v']) eta = np.array(final_data['SysAeroSurrogate.eta']) gamma = np.array(final_data['SysGammaBspline.Gamma']) temp = np.array(final_data['SysTemp.temp']) alpha = np.array(final_data['SysCLTar.alpha']) rho = np.array(final_data['SysRho.rho'])
from openmdao.lib.casehandlers.api import CaseDataset def get_constraint_value_from_case(cds, case, constraint_name): # can get at constraints using # cds.simulation_info[ 'expressions' ] to get the pseudos that # have that value # cds.simulation_info[ 'expressions' ]['pt1.ConS0 <= 0'] # equals # {u'pcomp_name': u'_pseudo_7', u'data_type': u'Constraint'} return case[cds.simulation_info['expressions'][constraint_name] ['pcomp_name']] cds = CaseDataset("CADRE.bson", "bson") vnames = cds.data.var_names().fetch() cases = cds.data.driver("driver").fetch() print "# cases", len(cases) X, Y, Z = [], [], [] pcom = [] for case in cases: data = [case['pt' + str(i) + '.Data'][0][1499] for i in xrange(6)] sumdata = sum([float(i) for i in data if i]) c1 = [ get_constraint_value_from_case(cds, case, "pt" + str(i) + ".ConCh <= 0") for i in xrange(6)
fuels = [] thrusts = [] weights = [] throttles = [] etas = [] rhos = [] lift_cs = [] drag_cs = [] gammas = [] temps = [] num_plts = 23 name = 'mission_history_737.bson' cds = CaseDataset(os.path.join(folder_path, name), 'bson') ac_w = cds.simulation_info['constants']['ac_w'] S = cds.simulation_info['constants']['S'] # print [n for n in cds.simulation_info['constants'].keys() if 'alpha' in n] # print cds.simulation_info['constants']['coupled_solver.alpha'] # exit() # ['SysAeroSurrogate.alpha'] data = cds.data.driver('driver').by_variable().fetch() dist = np.array(data['SysXBspline.x'][-1]) * 1e6 altitude = np.array(data['SysHBspline.h'][-1]) * 1e3 rho = np.array(data['SysRho.rho'][-1]) * 1e2 temp = np.array(data['SysTemp.temp'][-1]) * 1e2 speed = np.array(data['SysSpeed.v'][-1]) * 1e2
import numpy as np from matplotlib import pyplot as plt, rcParams from openmdao.lib.casehandlers.api import CaseDataset cds = CaseDataset('pilot_study.bson', 'bson') #ask for the variables you care about var_names = ['wing_weight.b', 'wing_weight.cbar', 'wing_weight.s', 'wing_weight.AR', 'wing_weight.tip_slope', 'wing_weight.M_tot', 'fuse_weight.M_tot', 'fuse_weight.N_pilot', 'fuse_weight.M_pilot', 'level.Cl', 'level.alpha', 'level.Re', 'level.drag', 'turning.drag','fuse_weight.V_flight' ] #Note why aren't level.V, wing_weight.V_flight in the data set? #note, should not have to do this, but the query for constant values seems broken. V_flight = cds.simulation_info['constants']['fuse_weight.V_flight'] M_pilot = cds.simulation_info['constants']['fuse_weight.M_pilot'] data = cds.data.driver('driver').vars(var_names).by_variable().fetch() #wing size charts rcParams['font.size'] = 15 #font size on all plots fig, ax = plt.subplots() ax.plot(data['fuse_weight.N_pilot'],data['wing_weight.b'], c='b', lw=5) ax.set_title('Wing Size vs # of Pilots') ax.set_xlabel('# of Pilots') ax.set_ylabel('Span (m)', color='b') for tl in ax.get_yticklabels(): tl.set_color('b') ax.set_xticks([1,2,3,4])
self.driver.add_parameter('paraboloid.x', low=-50, high=50) self.driver.add_parameter('paraboloid.y', low=-50, high=50) self.driver.add_response('paraboloid.f_xy') self.recorders = [JSONCaseRecorder(out='doe.json')] if __name__ == "__main__": #----------------------------- # Run analysis #----------------------------- import os if os.path.exists('doe.json'): os.remove('doe.json') from openmdao.lib.casehandlers.api import CaseDataset analysis = Analysis() analysis.run() #---------------------------------------------------- # Print out history of our objective for inspection #---------------------------------------------------- case_dataset = CaseDataset('doe.json', 'json') data = case_dataset.data.by_variable().fetch() for case in data: print case
from openmdao.lib.casehandlers.api import CaseDataset import csv #---------------------------------------------------- # Print out history of our objective for inspection #---------------------------------------------------- case_dataset = CaseDataset('opt_record.json', 'json') data = case_dataset.data.by_case().fetch() csvfile = open('opt_record.csv', 'wb') spamwriter = csv.writer(csvfile) for case in data: comp = case['compatibility.compatibility'] print comp if comp == 0: if 'driver.gen_num' in case: gen = [case['driver.gen_num']] else: gen = [None] in_opts = [ case['compatibility.option1_out'], case['compatibility.option2_out'], case['compatibility.option3_out'], case['compatibility.option4_out'], case['compatibility.option5_out'], case['compatibility.option6_out'], case['compatibility.option7_out'], case['compatibility.option8_out'], case['compatibility.option9_out'], ] spamwriter.writerow(gen + in_opts) #spamwriter.close()
# Buckling failure of spar (ConFailBuck) # Tensile failure in wire (ConFailWire) # # if flags.ConDef: # Constraints on Maximum Deformation (ConDelta) if __name__ == '__main__': import pylab as plt from makeplot import plot_single from openmdao.lib.casehandlers.api import JSONCaseRecorder opt = set_as_top(HeliOpt()) opt.recorders.append(JSONCaseRecorder(out='heli_opt.json')) opt.run() # for reference, MATLAB solution: # Omega: 1.0512 # Ptot: 421.3185 print 'Parameter: Omega =', opt.aso.config.Omega print 'Constraint: Weight-Lift =', (opt.aso.Mtot * 9.8 - opt.aso.Ttot) print 'Objective: Ptot =', opt.aso.Ptot from openmdao.lib.casehandlers.api import CaseDataset dataset = CaseDataset('heli_opt.json', 'json') data = dataset.data.by_case().fetch() case = data[-1] plot_single(case)
case_data=case_data, svg_dep_graph=svg_dep_graph, svg_comp_graph=svg_comp_graph ) with open(filename, "wb") as fh: fh.write(outputText) if __name__ == "__main__": import sys if len(sys.argv) == 3: json_file = sys.argv[1] html_file = sys.argv[2] elif len(sys.argv) == 2: json_file = sys.argv[1] html_file = sys.argv[1]+'.html' else: sys.exit('Usage: %s case_records_json_file output_html_file' % sys.argv[0]) if not os.path.exists(json_file): sys.exit('ERROR: Case records JSON file %s was not found!' % sys.argv[1]) cds = CaseDataset(json_file, 'json') data = cds.data # results caseset_query_to_html(data, filename=html_file) browser = webbrowser.get() browser.open(html_file, 1, True)
def setUp(self): # create_files() # Uncomment to create 'sellar.new' path = os.path.join(os.path.dirname(__file__), 'sellar.json') self.cds = CaseDataset(path, 'json')