def configure(self): # Components self.add("sin_meta_model", MetaModel()) self.sin_meta_model.model = Sin() self.sin_meta_model.default_surrogate = FloatKrigingSurrogate() # Training the MetaModel self.add("DOE_Trainer", DOEdriver()) self.DOE_Trainer.DOEgenerator = FullFactorial() self.DOE_Trainer.DOEgenerator.num_levels = 25 self.DOE_Trainer.add_parameter("sin_meta_model.x", low=0, high=20) self.DOE_Trainer.case_outputs = ["sin_meta_model.f_x"] self.DOE_Trainer.add_event("sin_meta_model.train_next") self.DOE_Trainer.recorders = [DBCaseRecorder()] # MetaModel Validation self.add("sin_calc", Sin()) self.add("DOE_Validate", DOEdriver()) self.DOE_Validate.DOEgenerator = Uniform() self.DOE_Validate.DOEgenerator.num_samples = 200 self.DOE_Validate.add_parameter(("sin_meta_model.x", "sin_calc.x"), low=0, high=20) self.DOE_Validate.case_outputs = ["sin_calc.f_x", "sin_meta_model.f_x"] self.DOE_Validate.recorders = [DBCaseRecorder()] # Iteration Hierarchy self.driver.workflow = SequentialWorkflow() self.driver.workflow.add(['DOE_Trainer', 'DOE_Validate']) self.DOE_Trainer.workflow.add('sin_meta_model') self.DOE_Validate.workflow.add('sin_meta_model') self.DOE_Validate.workflow.add('sin_calc')
def __init__(self): super(Simulation,self).__init__() #Components self.add("sin_meta_model",MetaModel()) self.sin_meta_model.surrogate = {"default":KrigingSurrogate()} self.sin_meta_model.model = Sin() self.sin_meta_model.recorder = DBCaseRecorder() #Training the MetaModel self.add("DOE_Trainer",DOEdriver()) self.DOE_Trainer.DOEgenerator = FullFactorial() self.DOE_Trainer.DOEgenerator.num_levels = 25 self.DOE_Trainer.add_parameter("sin_meta_model.x",low=0,high=20) self.DOE_Trainer.case_outputs = ["sin_meta_model.f_x"] self.DOE_Trainer.add_event("sin_meta_model.train_next") self.DOE_Trainer.recorders = [DBCaseRecorder()] self.DOE_Trainer.force_execute = True #MetaModel Validation self.add("sin_calc",Sin()) self.add("DOE_Validate",DOEdriver()) self.DOE_Validate.DOEgenerator = Uniform() self.DOE_Validate.DOEgenerator.num_samples = 100 self.DOE_Validate.add_parameter(("sin_meta_model.x","sin_calc.x"),low=0,high=20) self.DOE_Validate.case_outputs = ["sin_calc.f_x","sin_meta_model.f_x"] self.DOE_Validate.recorders = [DBCaseRecorder()] self.DOE_Validate.force_execute = True #Iteration Hierarchy self.driver.workflow = SequentialWorkflow() self.driver.workflow.add(['DOE_Trainer','DOE_Validate']) self.DOE_Trainer.workflow.add('sin_meta_model') self.DOE_Validate.workflow.add('sin_meta_model') self.DOE_Validate.workflow.add('sin_calc')
def test_string(self): recorder = DBCaseRecorder() case = Case(inputs=[('str', 'Normal String'), ('unicode', u'Unicode String'), ('list', ['Hello', 'world'])]) # Check pickling. recorder.record(case) for case in recorder.get_iterator(): self.assertEqual(case['str'], 'Normal String') self.assertEqual(case['unicode'], u'Unicode String') self.assertEqual(case['list'], ['Hello', 'world'])
def test_string(self): recorder = DBCaseRecorder() case = Case( inputs=[("str", "Normal String"), ("unicode", u"Unicode String"), ("list", ["Hello", "world"])] ) # Check pickling. recorder.record(case) for case in recorder.get_iterator(): self.assertEqual(case["str"], "Normal String") self.assertEqual(case["unicode"], u"Unicode String") self.assertEqual(case["list"], ["Hello", "world"])
def test_close(self): # :memory: can be used after close. recorder = DBCaseRecorder() case = Case(inputs=[('str', 'Normal String'), ('unicode', u'Unicode String'), ('list', ['Hello', 'world'])]) # Check pickling. recorder.record(case) recorder.close() recorder.record(case) # File-based DB recorder can not be used after close. tmpdir = tempfile.mkdtemp() try: dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) recorder.record(case) recorder.close() assert_raises(self, 'recorder.record(case)', globals(), locals(), RuntimeError, 'Attempt to record on closed recorder') finally: try: shutil.rmtree(tmpdir) except OSError: logging.error("problem removing directory %s" % tmpdir)
def test_pickle_conversion(self): recorder = DBCaseRecorder() for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i),0.5))] recorder.record(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) iterator = recorder.get_iterator() for i,case in enumerate(iterator): self.assertTrue(isinstance(case['comp2.normal'], NormalDistribution)) self.assertEqual(case['comp2.normal'].mu, float(i)) self.assertEqual(case['comp2.normal'].sigma, 0.5) self.assertTrue(isinstance(case['comp1.y'], float)) self.assertEqual(case['comp1.y'], i*2.) self.assertEqual(case['comp1.z'], i*1.5)
def test_query(self): recorder = DBCaseRecorder() for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i),0.5))] recorder.record(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) iterator = recorder.get_iterator() iterator.selectors = ["value>=0","value<3"] count = 0 for i,case in enumerate(iterator): count += 1 for name,value in case.items(): self.assertTrue(value >= 0 and value<3)
def _create_nested_assemblies(self, dbname, drivertype): top = set_as_top(self._create_assembly(dbname, drivertype)) top.add('asm', self._create_assembly(dbname, drivertype)) top.driver.workflow.add('asm') top.asm.add('asm', self._create_assembly(dbname, drivertype)) top.asm.driver.workflow.add('asm') top.driver.iterator = ListCaseIterator(self._create_cases(1)) top.driver.recorders = [DBCaseRecorder(dbname, append=True)] top.asm.driver.iterator = ListCaseIterator(self._create_cases(2)) top.asm.driver.recorders = [DBCaseRecorder(dbname, append=True)] top.asm.asm.driver.iterator = ListCaseIterator(self._create_cases(3)) top.asm.asm.driver.recorder = DBCaseRecorder(dbname, append=True) return top
def test_query(self): recorder = DBCaseRecorder() for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i),0.5))] recorder.record(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) iterator = recorder.get_iterator() iterator.selectors = ["value>=0","value<3"] count = 0 for i,case in enumerate(iterator): count += 1 for name,value in case.items(): self.assertTrue(value >= 0 and value<3) self.assertEqual(count, 3)
def run_arch_test_suite(arch=[], optproblems=[]): """Runs the architectures against optproblems and records the results. arch: list of Architectures The architectures to test. optproblems: list of OptProblems The OptProblems to use for testing the Architectures. """ compat_data = {} for p in optproblems: arch_data = {} prob_name = p.__class__.__name__ converge_file = open('%s_convergence_data.py' % prob_name, 'w') for a in arch: prob = openmdao.main.api.set_as_top(p.__class__()) arch_name = a.__class__.__name__ prob.architecture = a.__class__() recorders = [DBCaseRecorder()] prob.architecture.data_recorders = recorders print "Testing %s on %s" % (arch_name, prob_name), "...", try: prob.check_config() arch_data[p] = True except RuntimeError as err: arch_data[p] = False #not compatible, so just move on print "Incompatible" #raise err continue prob.run() print "Success" des_vars = prob.get_des_vars_by_comp() print " Function Evaluations (Derivative Evaluations): " for comp_name in des_vars: comp = prob.get(comp_name) print " %s: %d (%d)" % (comp_name, comp.exec_count, comp.derivative_exec_count) print " Errors: " for k, v in prob.check_solution().iteritems(): print " ", k, ": ", v #print prob.check_solution() iter_data = prob.architecture.data_recorders[0].get_iterator() data = [case['Objective'] for case in iter_data] #converge_file.write('%s = %s'%(arch_name,str(data))) print >> converge_file, '%s = %s' % (arch_name, str(data)) print compat_data[a] = arch_data return compat_data
def test_inoutDB(self): # This test runs some cases, puts them in a DB using a DBCaseRecorder, # then runs the model again using the same cases, pulled out of the DB # by a DBCaseIterator. Finally the cases are dumped to a string after # being run for the second time. self.top.recorders = [DBCaseRecorder()] self.top.run() # Gui pane stuff attrs = self.top.recorders[0].get_attributes() self.assertTrue("Inputs" in attrs.keys()) self.assertTrue({ 'name': 'dbfile', 'id': 'dbfile', 'type': 'str', 'connected': '', 'value': ':memory:', 'desc': 'Name of the database file to be recorded. Default ' + 'is ":memory:", which writes the database to memory.' } in attrs['Inputs']) # now use the DB as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()]
def test_inoutDB(self): """This test runs some cases, puts them in a DB using a DBCaseRecorder, then runs the model again using the same cases, pulled out of the DB by a DBCaseIterator. Finally the cases are dumped to a string after being run for the second time. """ self.top.driver.recorders = [DBCaseRecorder()] self.top.run() # Gui pane stuff attrs = self.top.driver.recorders[0].get_attributes() self.assertTrue("Inputs" in attrs.keys()) self.assertTrue({ 'name': 'dbfile', 'id': 'dbfile', 'type': 'str', 'connected': '', 'value': ':memory:', 'desc': 'Name of the database file to be recorded. Default ' + 'is ":memory:", which writes the database to memory.' } in attrs['Inputs']) # now use the DB as source of Cases self.top.driver.iterator = self.top.driver.recorders[0].get_iterator() sout = StringIO.StringIO() self.top.driver.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case: case8', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' inputs:', " comp1.a_dict: {'a': 'b'}", " comp1.a_list: ['a', 'b']", ' comp1.x: 8', ' comp1.y: 16', ' outputs:', ' comp1.z: 24.0', ' comp2.z: 25.0', ] lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case: case8'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index + i].startswith(' uuid:')) else: self.assertEqual(lines[index + i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_db_to_dict(self): tmpdir = tempfile.mkdtemp() dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) # create some Cases where some are missing a variable outputs = ['comp1.z', 'comp2.z'] inputs = ['comp1.x', 'comp1.y', 'comp1.y2'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i * 2, i * 3] outputs = [i * i, float('NaN')] recorder.record(self, inputs, outputs, None, '', '') varnames = ['comp1.x', 'comp1.y', 'comp1.y2'] varinfo = case_db_to_dict(dfile, varnames) self.assertEqual(len(varinfo), 3) # each var list should have 10 data values in it for lst in varinfo.values(): self.assertEqual(len(lst), 10) # now use caseiter_to_dict to grab the same data varinfo = caseiter_to_dict(recorder.get_iterator(), varnames) # each var list should have 10 data values in it for lst in varinfo.values(): self.assertEqual(len(lst), 10) try: shutil.rmtree(tmpdir, onerror=onerror) except OSError: logging.error("problem removing directory %s", tmpdir)
def _create_nested_workflows(self, dbname, drivertype): # this is kind of bogus because the inner driver loops are # overwriting the values set by the outer loops, but for # this test I'm only interested in checking if the # Case hierarchy is structured properly top = set_as_top(self._create_assembly(dbname, drivertype)) driver2 = top.add('driver2', drivertype()) driver2.recorders = [DBCaseRecorder(dbname, append=True)] top.driver.workflow.add(['driver2']) driver3 = top.add('driver3', drivertype()) driver3.recorders = [DBCaseRecorder(dbname, append=True)] top.driver2.workflow.add(['driver3']) top.driver3.workflow.add(['comp1','comp2']) top.driver.iterator = ListCaseIterator(self._create_cases(1)) top.driver2.iterator = ListCaseIterator(self._create_cases(2)) top.driver3.iterator = ListCaseIterator(self._create_cases(3)) return top
def _create_assembly(self, dbname, drivertype): asm = Assembly() driver = asm.add('driver', drivertype()) asm.add('comp1', ExecComp(exprs=['z=x+y'])) asm.add('comp2', ExecComp(exprs=['z=x+y'])) asm.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) driver.recorders = [DBCaseRecorder(dbname, append=True)] return asm
def test_tables_already_exist(self): dbdir = tempfile.mkdtemp() dbname = os.path.join(dbdir,'junk_dbfile') recorder = DBCaseRecorder(dbname) recorder._connection.close() recorder = DBCaseRecorder(dbname, append=True) recorder._connection.close() try: recorder = DBCaseRecorder(dbname) recorder._connection.close() except Exception as err: self.assertEqual('table cases already exists', str(err)) else: self.fail('expected Exception') try: shutil.rmtree(dbdir) except OSError: logging.error("problem removing directory %s" % dbdir)
def configure(self): #Components self.add("trig_meta_model", MetaModel()) self.trig_meta_model.surrogate = { "f_x_sin": LogisticRegression(), "f_x_cos": KrigingSurrogate() } self.trig_meta_model.model = Trig() self.trig_meta_model.recorder = DBCaseRecorder() #Training the MetaModel self.add("DOE_Trainer", DOEdriver()) self.DOE_Trainer.DOEgenerator = FullFactorial() self.DOE_Trainer.DOEgenerator.num_levels = 20 self.DOE_Trainer.add_parameter("trig_meta_model.x", low=0, high=20) self.DOE_Trainer.case_outputs = [ "trig_meta_model.f_x_sin", "trig_meta_model.f_x_cos" ] self.DOE_Trainer.add_event("trig_meta_model.train_next") self.DOE_Trainer.recorders = [DBCaseRecorder()] #MetaModel Validation self.add("trig_calc", Trig()) self.add("DOE_Validate", DOEdriver()) self.DOE_Validate.DOEgenerator = Uniform() self.DOE_Validate.DOEgenerator.num_samples = 20 self.DOE_Validate.add_parameter(("trig_meta_model.x", "trig_calc.x"), low=0, high=20) self.DOE_Validate.case_outputs = [ "trig_calc.f_x_sin", "trig_calc.f_x_cos", "trig_meta_model.f_x_sin", "trig_meta_model.f_x_cos" ] self.DOE_Validate.recorders = [DBCaseRecorder()] #Iteration Hierarchy self.driver.workflow = SequentialWorkflow() self.driver.workflow.add(['DOE_Trainer', 'DOE_Validate']) self.DOE_Trainer.workflow.add('trig_meta_model') self.DOE_Validate.workflow.add('trig_meta_model') self.DOE_Validate.workflow.add('trig_calc')
def test_inoutDB(self): # This test runs some cases, puts them in a DB using a DBCaseRecorder, # then runs the model again using the same cases, pulled out of the DB # by a DBCaseIterator. Finally the cases are dumped to a string after # being run for the second time. self.top.recorders = [DBCaseRecorder()] self.top.run() # now use the DB as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()]
def __init__(self): """Creates an Assembly to run DREA and HSRnoise.""" super(DREA_HSRnoise, self).__init__() FO1 = Case(inputs=[('point', 1),('dreaprep.Mach',0.28),('alt',2000.0),('dreaprep.PC',100.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) FO2 = Case(inputs=[('point', 1),('dreaprep.Mach',0.28),('alt',2000.0),('dreaprep.PC', 65.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) App = Case(inputs=[('point', 2),('dreaprep.Mach',0.20),('alt', 394.0),('dreaprep.PC', 30.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL1 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL2 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',30.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL3 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',60.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL4 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',90.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) cases = ListCaseIterator([FO1,FO2,App,SL1,SL2,SL3,SL4]) db_recorder = DBCaseRecorder() self.add('geo', Geometry()) self.add('dreaprep', DREAprep()) self.add('drea', DREA()) self.add('hsrnoise', HSRNOISE()) self.add('ACDgen', ACDgen()) self.add('analysis',CaseIteratorDriver()) self.analysis.iterator = cases self.analysis.recorders = [db_recorder] self.ACDgen.case_data = db_recorder.get_iterator() # Set up the workflows #--------------------------- #self.analysis.workflow.add(['dreaprep', 'drea', 'hsrnoise']) #self.driver.workflow.add(['analysis','ACDgen']) self.driver.workflow.add(['dreaprep', 'drea', 'hsrnoise']) # Connections #--------------------------- self.connect('geo',['drea.geo_in','hsrnoise.geo_in']) self.connect('alt',['dreaprep.alt','hsrnoise.ALTEVO']) self.connect('dreaprep.flow_out','drea.flow_in') self.connect('drea.flow_out','hsrnoise.flow_in') self.connect('drea.CFG','hsrnoise.CFG')
def test_string(self): recorder = DBCaseRecorder() inputs = ['str', 'unicode', 'list'] # Check pickling. recorder.register(self, inputs, []) inputs = ['Normal String', u'Unicode String', ['Hello', 'world']] recorder.record(self, inputs, [], None, '', '') for case in recorder.get_iterator(): self.assertEqual(case['str'], 'Normal String') self.assertEqual(case['unicode'], u'Unicode String') self.assertEqual(case['list'], ['Hello', 'world'])
def test_db_to_dict(self): tmpdir = tempfile.mkdtemp() dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) # create some Cases where some are missing a variable outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): if i > 1: msg = '' else: msg = 'an error occurred' if i < 5: inputs = [('comp1.x', i), ('comp1.y', i * 2), ('comp1.y2', i * 3)] else: inputs = [('comp1.x', i), ('comp1.y', i * 2)] recorder.record(Case(inputs=inputs, outputs=outputs, msg=msg)) varnames = ['comp1.x', 'comp1.y', 'comp1.y2'] varinfo = case_db_to_dict(dfile, varnames) self.assertEqual(len(varinfo), 3) # each var list should have 3 data values in it (5 with the required variables minus # 2 with errors for name, lst in varinfo.items(): self.assertEqual(len(lst), 3) # now use caseiter_to_dict to grab the same data varinfo = caseiter_to_dict(recorder.get_iterator(), varnames) # each var list should have 3 data values in it (5 with the required variables minus # 2 with errors for name, lst in varinfo.items(): self.assertEqual(len(lst), 3) try: shutil.rmtree(tmpdir) except OSError: logging.error("problem removing directory %s" % tmpdir)
def test_db_to_dict(self): tmpdir = tempfile.mkdtemp() dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) # create some Cases where some are missing a variable outputs = ['comp1.z', 'comp2.z'] inputs = ['comp1.x', 'comp1.y', 'comp1.y2'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i*2, i*3] outputs = [i*i, float('NaN')] recorder.record(self, inputs, outputs, None, '', '') varnames = ['comp1.x', 'comp1.y', 'comp1.y2'] varinfo = case_db_to_dict(dfile, varnames) self.assertEqual(len(varinfo), 3) # each var list should have 10 data values in it for lst in varinfo.values(): self.assertEqual(len(lst), 10) # now use caseiter_to_dict to grab the same data varinfo = caseiter_to_dict(recorder.get_iterator(), varnames) # each var list should have 10 data values in it for lst in varinfo.values(): self.assertEqual(len(lst), 10) try: shutil.rmtree(tmpdir, onerror=onerror) except OSError: logging.error("problem removing directory %s", tmpdir)
def _create_nested_assemblies(self, dbname, drivertype): top = set_as_top(self._create_assembly(dbname, drivertype)) top.add('asm', self._create_assembly(dbname, drivertype)) top.driver.workflow.add('asm') top.asm.add('asm', self._create_assembly(dbname, drivertype)) top.asm.driver.workflow.add('asm') top.recorders = [DBCaseRecorder(dbname, append=True)] Case.set_vartree_inputs(top.driver, self._create_cases(1)) Case.set_vartree_inputs(top.asm.driver, self._create_cases(2)) Case.set_vartree_inputs(top.asm.asm.driver, self._create_cases(3)) return top
def __init__(self, *args, **kwargs): super(Analysis, self).__init__(self, *args, **kwargs) self._tdir = mkdtemp() #Components self.add("A", MetaModel()) self.A.surrogate = {'default': KrigingSurrogate()} self.A.model = ConceptA() self.A.recorder = DBCaseRecorder(':memory:') self.add('DOE_maker', DOE_Maker()) self.DOE_maker.cases = [10] * 3 #self.DOE_maker.force_execute = True #Drivers self.add("trainA", DOEdriver()) self.trainA.sequential = True self.trainA.add_parameter("A.x") self.trainA.add_parameter("A.y") self.trainA.add_parameter("A.z") self.trainA.add_event("A.train_next") self.trainA.case_outputs = ['A.f1', 'A.f2'] self.trainA.recorder = DBCaseRecorder(os.path.join(self._tdir, 'A.db')) self.add('driver', Iterator()) self.driver.add_stop_condition('len(DOE_maker.cases)==0') self.driver.add_event('A.reset_training_data') self.add('res', Res()) self.res.force_execute = True #Iteration Hierarchy self.driver.workflow.add(['DOE_maker', 'trainA', 'res']) #self.driver.workflow.add(['DOE_maker','trainA']) self.trainA.workflow.add('A') #Data Connections self.connect('DOE_maker.DOEgen', 'trainA.DOEgenerator')
def test_db_to_dict(self): tmpdir = tempfile.mkdtemp() dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) # create some Cases where some are missing a variable outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): if i>1: msg = '' else: msg = 'an error occurred' if i<5: inputs = [('comp1.x', i), ('comp1.y', i*2), ('comp1.y2', i*3)] else: inputs = [('comp1.x', i), ('comp1.y', i*2)] recorder.record(Case(inputs=inputs, outputs=outputs, msg=msg)) varnames = ['comp1.x','comp1.y','comp1.y2'] varinfo = case_db_to_dict(dfile, varnames) self.assertEqual(len(varinfo), 3) # each var list should have 3 data values in it (5 with the required variables minus # 2 with errors for name,lst in varinfo.items(): self.assertEqual(len(lst), 3) # now use caseiter_to_dict to grab the same data varinfo = caseiter_to_dict(recorder.get_iterator(), varnames) # each var list should have 3 data values in it (5 with the required variables minus # 2 with errors for name,lst in varinfo.items(): self.assertEqual(len(lst), 3) try: shutil.rmtree(tmpdir) except OSError: logging.error("problem removing directory %s" % tmpdir)
def test_tables_already_exist(self): dbdir = tempfile.mkdtemp() dbname = os.path.join(dbdir, 'junk_dbfile') recorder = DBCaseRecorder(dbname) recorder.close() recorder = DBCaseRecorder(dbname, append=True) recorder.close() try: recorder = DBCaseRecorder(dbname) recorder.close() except Exception as err: self.assertEqual('table cases already exists', str(err)) else: self.fail('expected Exception') try: shutil.rmtree(dbdir, onerror=onerror) except OSError: logging.error("problem removing directory %s", dbdir)
def test_query(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i * 2.] outputs = [i * 1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() iterator.selectors = ["value>=0", "value<3"] count = 0 for i, case in enumerate(iterator): count += 1 for value in case.values(): self.assertTrue(value >= 0 and value < 3)
def test_pickle_conversion(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i * 2.] outputs = [i * 1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() for i, case in enumerate(iterator): self.assertTrue( isinstance(case['comp2.normal'], NormalDistribution)) self.assertEqual(case['comp2.normal'].mu, float(i)) self.assertEqual(case['comp2.normal'].sigma, 0.5) self.assertTrue(isinstance(case['comp1.y'], float)) self.assertEqual(case['comp1.y'], i * 2.) self.assertEqual(case['comp1.z'], i * 1.5)
def test_pickle_conversion(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i*2.] outputs = [i*1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() for i, case in enumerate(iterator): self.assertTrue(isinstance(case['comp2.normal'], NormalDistribution)) self.assertEqual(case['comp2.normal'].mu, float(i)) self.assertEqual(case['comp2.normal'].sigma, 0.5) self.assertTrue(isinstance(case['comp1.y'], float)) self.assertEqual(case['comp1.y'], i*2.) self.assertEqual(case['comp1.z'], i*1.5)
def test_inoutDB(self): """This test runs some cases, puts them in a DB using a DBCaseRecorder, then runs the model again using the same cases, pulled out of the DB by a DBCaseIterator. Finally the cases are dumped to a string after being run for the second time. """ self.top.driver.recorders = [DBCaseRecorder()] self.top.run() # now use the DB as source of Cases self.top.driver.iterator = self.top.driver.recorders[0].get_iterator() sout = StringIO.StringIO() self.top.driver.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case: case8', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' inputs:', ' comp1.x: 8', ' comp1.y: 16', ' outputs:', ' comp1.z: 24.0', ' comp2.z: 25.0', ] lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case: case8'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index + i].startswith(' uuid:')) else: self.assertEqual(lines[index + i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_query(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i*2.] outputs = [i*1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() iterator.selectors = ["value>=0", "value<3"] count = 0 for i, case in enumerate(iterator): count += 1 for value in case.values(): self.assertTrue(value >= 0 and value < 3) self.assertEqual(count, 3)
def configure(self): self._tdir = mkdtemp() self.comp_name = None #check to make sure no more than one component is being referenced compnames = set() for param in self.parent.get_parameters().values(): compnames.update(param.get_referenced_compnames()) if len(compnames) > 1: self.parent.raise_exception('The EGO architecture can only be used on one' 'component at a time, but parameters from %s ' 'were added to the problem formulation.' %compnames, ValueError) self.comp_name = compnames.pop() #change name of component to add '_model' to it. # lets me name the metamodel as the old name self.comp= getattr(self.parent,self.comp_name) self.comp.name = "%s_model"%self.comp_name #add in the metamodel meta_model = self.parent.add(self.comp_name,MetaModel()) #metamodel now replaces old component with same name meta_model.default_surrogate = KrigingSurrogate() meta_model.model = self.comp meta_model_recorder = DBCaseRecorder(os.path.join(self._tdir,'trainer.db')) meta_model.recorder = meta_model_recorder meta_model.force_execute = True EI = self.parent.add("EI",ExpectedImprovement()) self.objective = self.parent.get_objectives().keys()[0] EI.criteria = self.objective pfilter = self.parent.add("filter",ParetoFilter()) pfilter.criteria = [self.objective] pfilter.case_sets = [meta_model_recorder.get_iterator(),] pfilter.force_execute = True #Driver Configuration DOE_trainer = self.parent.add("DOE_trainer",DOEdriver()) DOE_trainer.sequential = True DOE_trainer.DOEgenerator = OptLatinHypercube(num_samples=self.initial_DOE_size) for name,param in self.parent.get_parameters().iteritems(): DOE_trainer.add_parameter(param) DOE_trainer.add_event("%s.train_next"%self.comp_name) DOE_trainer.case_outputs = [self.objective] DOE_trainer.recorders = [DBCaseRecorder(':memory:')] EI_opt = self.parent.add("EI_opt",Genetic()) EI_opt.opt_type = "maximize" EI_opt.population_size = 100 EI_opt.generations = 10 #EI_opt.selection_method = "tournament" for name,param in self.parent.get_parameters().iteritems(): EI_opt.add_parameter(param) EI_opt.add_objective("EI.%s"%self.EI_PI) retrain = self.parent.add("retrain",Driver()) retrain.recorders = self.data_recorders retrain.add_event("%s.train_next"%self.comp_name) iter = self.parent.add("iter",IterateUntil()) iter.max_iterations = self.sample_iterations iter.add_stop_condition('EI.PI <= %s'%self.min_ei_pi) #Data Connections self.parent.connect("filter.pareto_set","EI.best_case") self.parent.connect(self.objective,"EI.predicted_value") #Iteration Heirarchy self.parent.driver.workflow.add(['DOE_trainer', 'iter']) #DOE_trainer.workflow.add(self.comp_name) iter.workflow = SequentialWorkflow() iter.workflow.add(['filter', 'EI_opt', 'retrain']) #EI_opt.workflow.add([self.comp_name,'EI']) retrain.workflow.add(self.comp_name)
def configure(self): """ Creates a new Assembly with this problem Optimal Design at (1.9776, 0, 0) Optimal Objective = 3.18339""" #objective = '(dis1.x1)**2 + dis1.z2 + dis1.y1 + exp(-dis2.y2)' #constraint1 = 'dis1.y1 > 3.16' #constraint2 = 'dis2.y2 < 24.0' # Metamodel for sellar discipline 1 self.add("meta_model_dis1", MetaModel()) self.meta_model_dis1.surrogates = {"y1": ResponseSurface()} self.meta_model_dis1.model = SellarDiscipline1() self.meta_model_dis1.recorder = DBCaseRecorder() self.meta_model_dis1.force_execute = True # Metamodel for sellar discipline 2 self.add("meta_model_dis2", MetaModel()) self.meta_model_dis2.surrogates = {"y2": ResponseSurface()} self.meta_model_dis2.model = SellarDiscipline2() self.meta_model_dis2.recorder = DBCaseRecorder() self.meta_model_dis2.force_execute = True #training metalmodel for disc1 # self.add("DOE_Trainer_dis2",NeighborhoodDOEdriver()) # self.DOE_Trainer_dis2.DOEgenerator = CentralComposite() # self.DOE_Trainer_dis2.alpha = .1 # self.DOE_Trainer_dis2.add_parameter("meta_model_dis2.z1",low=-10,high=10,start=5.0) # self.DOE_Trainer_dis2.add_parameter("meta_model_dis2.z2",low=0,high=10,start=2.0) # self.DOE_Trainer_dis2.add_parameter("meta_model_dis2.y1",low=0,high=20) # self.DOE_Trainer_dis2.add_event("meta_model_dis2.train_next") # self.DOE_Trainer_dis2.force_execute = True #optimization of global objective function self.add('sysopt', SLSQPdriver()) self.sysopt.add_objective( '(meta_model_dis1.x1)**2 + meta_model_dis1.z2 + meta_model_dis1.y1 + math.exp(-meta_model_dis2.y2)' ) self.sysopt.add_parameter(['meta_model_dis1.z1', 'meta_model_dis2.z1'], low=-10, high=10.0, start=5.0) self.sysopt.add_parameter(['meta_model_dis1.z2', 'meta_model_dis2.z2'], low=0, high=10.0, start=2.0) self.sysopt.add_parameter('meta_model_dis1.y2', low=-1e99, high=1e99) self.sysopt.add_parameter('meta_model_dis2.y1', low=-1e99, high=1e99) #feasibility constraints self.sysopt.add_constraint('meta_model_dis1.y2 <= meta_model_dis2.y2') self.sysopt.add_constraint('meta_model_dis1.y2 >= meta_model_dis2.y2') self.sysopt.add_constraint('meta_model_dis2.y1 <= meta_model_dis1.y1') self.sysopt.add_constraint('meta_model_dis2.y1 >= meta_model_dis1.y1') self.sysopt.add_constraint('3.16 < meta_model_dis1.y1') self.sysopt.add_constraint('meta_model_dis2.y2 < 24.0') self.sysopt.force_execute = True #optimization of discipline 1 (discipline 2 of the sellar problem has no local variables) self.add('local_opt_dis1', SLSQPdriver()) self.local_opt_dis1.add_objective('meta_model_dis1.y1') self.local_opt_dis1.add_parameter('meta_model_dis1.x1', low=0, high=10.0) self.local_opt_dis1.add_constraint('3.16 < meta_model_dis1.y1') self.local_opt_dis1.add_event('meta_model_dis1.train_next') self.local_opt_dis1.force_execute = True self.local_opt_dis1.workflow.add(['meta_model_dis1']) #training metalmodel for disc1 self.add("DOE_Trainer_dis1", NeighborhoodDOEdriver()) self.DOE_Trainer_dis1.DOEgenerator = CentralComposite() self.DOE_Trainer_dis1.alpha = .1 self.DOE_Trainer_dis1.add_parameter("meta_model_dis1.z1", low=-10, high=10, start=5.0) self.DOE_Trainer_dis1.add_parameter("meta_model_dis1.z2", low=0, high=10, start=2.0) self.DOE_Trainer_dis1.add_parameter("meta_model_dis1.y2", low=-100, high=100) self.DOE_Trainer_dis1.add_event("meta_model_dis1.train_next") self.DOE_Trainer_dis1.force_execute = True self.DOE_Trainer_dis1.workflow.add("local_opt_dis1") self.add('reset_train', Driver()) self.reset_train.add_event('meta_model_dis1.reset_training_data') self.reset_train.add_event('meta_model_dis2.reset_training_data') self.reset_train.workflow.add(['meta_model_dis1', 'meta_model_dis2']) self.reset_train.force_execute = True #build workflow for bliss2000 self.add('driver', FixedPointIterator()) #self.add('main_driver', IterateUntil()) #self.main_driver.max_iterations = 1 self.driver.tolerance = .0001 # self.driver.workflow.add(['local_opt_dis1','reset_train','DOE_Trainer_dis1','DOE_Trainer_dis2','sysopt']) self.driver.workflow.add(['sysopt']) self.driver.add_parameter('x1_store', low=0, high=10.0) self.driver.add_constraint('meta_model_dis1.x1 = x1_store') self.driver.add_parameter('z1_store', low=0, high=10.0) self.driver.add_constraint('meta_model_dis1.z1 = z1_store') self.driver.add_parameter('z2_store', low=0, high=10.0) self.driver.add_constraint('meta_model_dis1.z2 = z2_store')
def configure(self): self._tdir = mkdtemp() #Components self.add("branin_meta_model", MetaModel()) self.branin_meta_model.default_surrogate = KrigingSurrogate() self.branin_meta_model.model = BraninComponent() self.branin_meta_model.recorder = DBCaseRecorder(':memory:') self.branin_meta_model.force_execute = True self.add("EI", ExpectedImprovement()) self.EI.criteria = "branin_meta_model.f_xy" self.add("filter", ParetoFilter()) self.filter.criteria = ['branin_meta_model.f_xy'] self.filter.case_sets = [ self.branin_meta_model.recorder.get_iterator(), ] self.filter.force_execute = True #Driver Configuration self.add("DOE_trainer", DOEdriver()) self.DOE_trainer.sequential = True self.DOE_trainer.DOEgenerator = OptLatinHypercube(num_samples=15) #self.DOE_trainer.DOEgenerator = FullFactorial(num_levels=5) self.DOE_trainer.add_parameter("branin_meta_model.x", low=-5., high=10.) self.DOE_trainer.add_parameter("branin_meta_model.y", low=0., high=15.) self.DOE_trainer.add_event("branin_meta_model.train_next") self.DOE_trainer.case_outputs = ["branin_meta_model.f_xy"] self.DOE_trainer.recorders = [ DBCaseRecorder(os.path.join(self._tdir, 'trainer.db')) ] self.add("EI_opt", Genetic()) self.EI_opt.opt_type = "maximize" self.EI_opt.population_size = 100 self.EI_opt.generations = 10 #self.EI_opt.selection_method = "tournament" self.EI_opt.add_parameter("branin_meta_model.x", low=-5., high=10.) self.EI_opt.add_parameter("branin_meta_model.y", low=0., high=15.) self.EI_opt.add_objective("EI.PI") self.add("retrain", MyDriver()) self.retrain.add_event("branin_meta_model.train_next") self.retrain.recorders = [ DBCaseRecorder(os.path.join(self._tdir, 'retrain.db')) ] self.add("iter", IterateUntil()) self.iter.max_iterations = 30 self.iter.add_stop_condition('EI.EI <= .0001') #Iteration Heirarchy self.driver.workflow.add(['DOE_trainer', 'iter']) self.DOE_trainer.workflow.add('branin_meta_model') self.iter.workflow = SequentialWorkflow() self.iter.workflow.add(['filter', 'EI_opt', 'retrain']) self.EI_opt.workflow.add(['branin_meta_model', 'EI']) self.retrain.workflow.add('branin_meta_model') #Data Connections self.connect("filter.pareto_set", "EI.best_case") self.connect("branin_meta_model.f_xy", "EI.predicted_value")
opt_problem = OptimizationConstrained() # ----------------------------- # Set up our CaseRecorders #----------------------------- import os if os.path.exists('converge.db'): os.remove('converge.db') from openmdao.lib.casehandlers.api import CSVCaseRecorder, DBCaseRecorder opt_problem.driver.recorders = [ CSVCaseRecorder(filename='converge.csv'), DBCaseRecorder(dbfile='converge.db', append=False) ] opt_problem.driver.printvars = ['*'] #----------------------------- # Run problem #----------------------------- opt_problem.run() #---------------------------------------------------- # Print out history of our objective for inspection #---------------------------------------------------- for case in opt_problem.driver.recorders[0].get_iterator(): print case
def test_close(self): # :memory: can be used after close. recorder = DBCaseRecorder() case = Case( inputs=[("str", "Normal String"), ("unicode", u"Unicode String"), ("list", ["Hello", "world"])] ) # Check pickling. recorder.record(case) recorder.close() recorder.record(case) # File-based DB recorder can not be used after close. tmpdir = tempfile.mkdtemp() try: dfile = os.path.join(tmpdir, "junk.db") recorder = DBCaseRecorder(dfile) recorder.record(case) recorder.close() assert_raises( self, "recorder.record(case)", globals(), locals(), RuntimeError, "Attempt to record on closed recorder" ) finally: try: shutil.rmtree(tmpdir) except OSError: logging.error("problem removing directory %s" % tmpdir)
def configure(self): self._tdir = mkdtemp() self.comp_name = None #check to make sure no more than one component is being referenced compnames = set() for param in self.parent.get_parameters().values(): compnames.update(param.get_referenced_compnames()) if len(compnames) > 1: self.parent.raise_exception('The EGO architecture can only be used on one' 'component at a time, but parameters from %s ' 'were added to the problem formulation.' %compnames, ValueError) self.comp_name = compnames.pop() #change name of component to add '_model' to it. # lets me name the metamodel as the old name self.comp= getattr(self.parent,self.comp_name) self.comp.name = "%s_model"%self.comp_name #add in the metamodel meta_model = self.parent.add(self.comp_name,MetaModel()) #metamodel now replaces old component with same name meta_model.default_surrogate = KrigingSurrogate() meta_model.model = self.comp meta_model_recorder = DBCaseRecorder(os.path.join(self._tdir,'trainer.db')) meta_model.recorder = meta_model_recorder EI = self.parent.add("EI",ExpectedImprovement()) self.objective = self.parent.get_objectives().keys()[0] EI.criteria = self.objective pfilter = self.parent.add("filter",ParetoFilter()) pfilter.criteria = [self.objective] pfilter.case_sets = [meta_model_recorder.get_iterator(),] #Driver Configuration DOE_trainer = self.parent.add("DOE_trainer",DOEdriver()) DOE_trainer.sequential = True DOE_trainer.DOEgenerator = OptLatinHypercube(num_samples=self.initial_DOE_size) for name,param in self.parent.get_parameters().iteritems(): DOE_trainer.add_parameter(param) DOE_trainer.add_event("%s.train_next"%self.comp_name) DOE_trainer.case_outputs = [self.objective] DOE_trainer.recorders = [DBCaseRecorder(':memory:')] EI_opt = self.parent.add("EI_opt",Genetic()) EI_opt.opt_type = "maximize" EI_opt.population_size = 100 EI_opt.generations = 10 #EI_opt.selection_method = "tournament" for name,param in self.parent.get_parameters().iteritems(): EI_opt.add_parameter(param) EI_opt.add_objective("EI.%s"%self.EI_PI) retrain = self.parent.add("retrain",Driver()) retrain.recorders = self.data_recorders retrain.add_event("%s.train_next"%self.comp_name) iter = self.parent.add("iter",IterateUntil()) iter.max_iterations = self.sample_iterations iter.add_stop_condition('EI.PI <= %s'%self.min_ei_pi) #Data Connections self.parent.connect("filter.pareto_set","EI.best_case") self.parent.connect(self.objective,"EI.predicted_value") #Iteration Heirarchy self.parent.driver.workflow.add(['DOE_trainer', 'iter']) #DOE_trainer.workflow.add(self.comp_name) iter.workflow = SequentialWorkflow() iter.workflow.add(['filter', 'EI_opt', 'retrain']) #EI_opt.workflow.add([self.comp_name,'EI']) retrain.workflow.add(self.comp_name)
def configure(self): """Setup a BLISS2000 architecture inside this assembly. """ global_dvs = self.parent.get_global_des_vars() des_vars=self.parent.get_des_vars_by_comp() local_dvs_by_comp = self.parent.get_local_des_vars_by_comp() global_dvs_by_comp = self.parent.get_global_des_vars_by_comp() locals=self.parent.get_local_des_vars() objective = self.parent.get_objectives().items()[0] comp_constraints = self.parent.get_constraints_by_comp() coupling = self.parent.list_coupling_vars() couple_deps = self.parent.get_coupling_deps_by_comp() couple_indeps = self.parent.get_coupling_indeps_by_comp() driver=self.parent.add("driver",FixedPointIterator()) driver.workflow = SequentialWorkflow() driver.max_iteration=15 #should be enough to converge driver.tolerance = .005 meta_models = {} self.sub_system_opts = {} system_var_map = {} for comp in des_vars: mm_name = "meta_model_%s"%comp meta_model = self.parent.add(mm_name,MetaModel()) #metamodel now replaces old component with same name driver.add_event("%s.reset_training_data"%mm_name) meta_models[comp] = meta_model meta_model.default_surrogate = ResponseSurface() #if there are locals, you need to make a SubSystemOpt assembly comp_obj = self.parent.get(comp) sso = self.parent.add('sub_system_opt_%s'%comp, SubSystemOpt(comp_obj, global_dvs_by_comp.get(comp), local_dvs_by_comp.get(comp), couple_deps.get(comp), couple_indeps.get(comp), comp_constraints.get(comp))) self.sub_system_opts[comp] = sso meta_model.model = sso for name,mapped_name in sso.var_map.iteritems(): system_var_map[name] = "%s.%s"%(mm_name,mapped_name) meta_model.recorder = DBCaseRecorder() #add a doe trainer for each metamodel dis_doe=self.parent.add("DOE_Trainer_%s"%comp,NeighborhoodDOEdriver()) for couple in couple_indeps[comp] : mapped_name = system_var_map[couple.indep.target] dis_doe.add_parameter(mapped_name,low=-1e99,high=1e99) #change to -1e99/1e99 for dv in global_dvs_by_comp[comp]: dis_doe.add_parameter(system_var_map[dv.target],low=dv.low, high=dv.high,start=dv.start) if local_dvs_by_comp.get(comp): #add weights if they are there for w in meta_model.model.weights: dis_doe.add_parameter("meta_model_%s.%s"%(comp,w),low=-3,high=3) num_params = len(dis_doe.get_parameters()) dis_doe.DOEgenerator = LatinHypercube((num_params**2+3*num_params+2)/2) dis_doe.alpha= .1 dis_doe.beta = .01 dis_doe.add_event("meta_model_%s.train_next"%comp) dis_doe.force_execute = True driver.workflow.add(dis_doe.name) #run all doe training before system optimziation #optimization of system objective function using the discipline meta models sysopt=self.parent.add('sysopt', SLSQPdriver()) sysopt.recorders = self.data_recorders sysopt.iprint = 0 sysopt.differentiator = FiniteDifference() obj2= objective[1].text #for comp in objective[1].get_referenced_compnames(): # obj2=obj2.replace(comp,"meta_model_%s"%comp) for var_name, mapped_name in system_var_map.iteritems(): obj2=obj2.replace(var_name,mapped_name) sysopt.add_objective(obj2) #add global design variables as parameters for param,group in global_dvs: plist=[system_var_map[t] for t in group.targets] sysopt.add_parameter(plist, low=group.low, high=group.high,start=group.start) #add the subsytem weights to the system optimization for comp,sso in self.sub_system_opts.iteritems(): mm_name = "meta_model_%s"%comp for w in sso.weights: sysopt.add_parameter("%s.%s"%(mm_name,w),low=-3,high=3) for key,couple in coupling.iteritems(): s=couple.indep.target mapped_name = system_var_map[s] sysopt.add_parameter(mapped_name, low=-1e99, high=1e99) #feasibility constraints, referenced to metamodels s1,s2= system_var_map[couple.dep.target], system_var_map[couple.indep.target] sysopt.add_constraint('(%s-%s)**2<=0.0001'%(s2,s1)) #sysopt.add_constraint('%s>=%s'%(s2,s1)) #add constraints, referenced to metamodels for comp,constraints in comp_constraints.iteritems(): for c in constraints: new_c = str(c) for var,mapped_name in system_var_map.iteritems(): new_c = new_c.replace(var,mapped_name) sysopt.add_constraint(new_c) driver.workflow.add('sysopt') #setup paramter for fixedpointiterator comp=des_vars.keys()[0] mm='meta_model_%s'%comp #create some placeholder variables for the fixed point iteration for l in locals: s=system_var_map[l[0]].replace(".","_") s2='%s_store'%s self.parent.add(s2,Float(0.0)) driver.add_parameter(s2 , low=l[1].low, high=l[1].high) driver.add_constraint('%s = %s'%(system_var_map[l[1].target],s2)) for i,g in enumerate(global_dvs): s2='global%d_store'%i self.parent.add(s2,Float(0.0)) driver.add_parameter(s2 , low=g[1].low, high=g[1].high) driver.add_constraint('%s = %s'%(system_var_map[g[1].target],s2))
from openmdao.lib.architectures.api import MDF, BLISS, CO from openmdao.lib.casehandlers.api import DBCaseRecorder from openmdao.lib.optproblems.api import SellarProblem if __name__ == "__main__": # pragma: no cover import time from openmdao.main.api import set_as_top prob = SellarProblem() prob.architecture = MDF() prob.configure() prob.driver.recorders = [DBCaseRecorder()] tt = time.time() prob.run() error = prob.check_solution() print "\nUsing MDF Architecture" print "Minimum found at (%f, %f, %f)" % (prob.dis1.z1, prob.dis1.z2, prob.dis1.x1) print "Minimum differs from expected by (%f, %f, %f)" % ( error["z1"], error["z2"], error['dis1.x1']) print "Couping vars: %f, %f" % (prob.dis1.y1, prob.dis2.y2) print "Minimum objective: ", prob.solution['obj1'] print "Elapsed time: ", time.time() - tt, "seconds"
def test_close(self): # :memory: can be used after close. recorder = DBCaseRecorder() inps = ['str', 'unicode', 'list'] recorder.register(self, inps, []) inputs = ['Normal String', u'Unicode String', ['Hello', 'world']] recorder.record(self, inputs, [], None, '', '') recorder.close() recorder.record(self, inputs, [], None, '', '') # File-based DB recorder can not be used after close. tmpdir = tempfile.mkdtemp() try: dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) recorder.register(self, inps, []) recorder.record(self, inputs, [], None, '', '') recorder.close() code = "recorder.record(self, inputs, [], None, '', '')" assert_raises(self, code, globals(), locals(), RuntimeError, 'Attempt to record on closed recorder') finally: try: shutil.rmtree(tmpdir, onerror=onerror) except OSError: logging.error("problem removing directory %s", tmpdir)