def test_subassembly(self): logging.debug('') logging.debug('test_subassembly') top = set_as_top(Assembly()) sub = top.add('sub', Assembly()) sub.force_execute = True top.driver.workflow.add('sub') sub.add('driver', CaseIteratorDriver()) sub.add('comp1', TracedComponent()) sub.add('comp2', TracedComponent()) sub.driver.workflow.add(('comp1', 'comp2')) cases = [] for i in range(3): cases.append( Case(label=str(i), inputs=(('comp1.inp', i), ('comp2.inp', i)), outputs=(('comp1.itername', 'comp2.itername')))) # Sequential. sub.driver.iterator = ListCaseIterator(cases) top.run() self.verify_itername(sub.driver.evaluated, subassembly=True) # Concurrent. sub.driver.sequential = False sub.driver.iterator = ListCaseIterator(cases) top.run() self.verify_itername(sub.driver.evaluated, subassembly=True)
def _create_nested_assemblies(self, dbname, drivertype): top = set_as_top(self._create_assembly(dbname, drivertype)) top.add('asm', self._create_assembly(dbname, drivertype)) top.driver.workflow.add('asm') top.asm.add('asm', self._create_assembly(dbname, drivertype)) top.asm.driver.workflow.add('asm') top.driver.iterator = ListCaseIterator(self._create_cases(1)) top.driver.recorders = [DBCaseRecorder(dbname, append=True)] top.asm.driver.iterator = ListCaseIterator(self._create_cases(2)) top.asm.driver.recorders = [DBCaseRecorder(dbname, append=True)] top.asm.asm.driver.iterator = ListCaseIterator(self._create_cases(3)) top.asm.asm.driver.recorder = DBCaseRecorder(dbname, append=True) return top
def test_inoutCSV_empty_inputs(self): # now create some Cases outputs = ['comp1.z'] cases = [] for i in range(10): cases.append(Case(inputs=[], outputs=outputs, label='case%s'%i)) self.top.driver.iterator = ListCaseIterator(cases) self.top.driver.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.run() # now use the CSV recorder as source of Cases self.top.driver.iterator = self.top.driver.recorders[0].get_iterator() sout = StringIO.StringIO() self.top.driver.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case: case8', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' outputs:', ' comp1.z: 0.0', ] lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case: case8'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def setup_cases(self): """ setup the cases """ if (self.run_dakota): self.ws_driver.setup_cases(self.studycases, self.aerocode) else: self.runcases = [] ## cases should be list of DesignLoadCases for dlc in self.studycases: if (self.raw_cases): # goes with "tabular" input distn format print "building dlc for: ", dlc.x runcase = ParamDesignLoadCaseBuilder.buildRunCase_x( dlc.x, dlc.param_names, dlc) self.runcases.append( Case(inputs=[('runner.input', runcase)])) else: print 'Generating run cases for study case %s' % dlc.name # ask aero code to produce runcass for this study case allruns = self.aerocode.genRunCases(dlc) for runcase in allruns: print 'Adding Case for run case %s' % runcase.name # create the case # self.runcases.append(Case(inputs= [('runner.input', runcase)], # outputs=['runner.output', 'runner.input'])) self.runcases.append( Case(inputs=[('runner.input', runcase)])) ## vars used here need to exist in relevant (sub)-objects ##(ie aerocode.input needs to exist--eg in openAeroCode) , else openMDAO throws exception ## This will result in aerocode.execute() being called with self.input = runcase = relevant RunCase save_run_cases(dlc.name, allruns) self.ws_driver.iterator = ListCaseIterator(self.runcases)
def run_cases(self, sequential, forced_errors=False, retry=True): """ Evaluate cases, either sequentially or across multiple servers. """ self.model.driver.sequential = sequential if not sequential: # Try to ensure more than one worker is used. self.model.driven.sleep = 0.2 self.model.driver.iterator = ListCaseIterator(self.cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.printvars = ['driven.extra'] self.model.driver.error_policy = 'RETRY' if retry else 'ABORT' if retry: self.model.run() self.assertEqual(len(results), len(self.cases)) self.verify_results(forced_errors) else: try: self.model.run() except Exception as err: err = replace_uuid(str(err)) startmsg = 'driver: Run aborted: Traceback ' endmsg = 'driven (UUID.4-1): Forced error' self.assertEqual(str(err)[:len(startmsg)], startmsg) self.assertEqual(str(err)[-len(endmsg):], endmsg) else: self.fail("Exception expected")
def test_nooutput(self): logging.debug('') logging.debug('test_nooutput') # Create cases with missing output 'dc.sum_z'. cases = [] for i in range(2): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10))] outputs = [('driven.rosen_suzuki', None), ('driven.sum_z', None)] cases.append(Case(inputs, outputs)) self.model.driver.iterator = ListCaseIterator(cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.printvars = ['driven.extra'] self.model.driver.error_policy = 'RETRY' self.model.run() self.assertEqual(len(results), len(cases)) for case in results.cases: expected = "driver: Exception getting case outputs: " \ "driven \(UUID.[0-9]+-1\): " \ "'DrivenComponent' object has no attribute 'sum_z'" msg = replace_uuid(case.msg) self.assertTrue(re.match(expected, msg))
def test_norecorder(self): logging.debug('') logging.debug('test_norecorder') # Check response to no recorder set. self.model.driver.iterator = ListCaseIterator([]) self.model.run()
def execute(self): inp = self.comp_name + '.x' out = self.comp_name + '.y' cases = [] for i in range(self.max_iterations): cases.append(Case(inputs=[(inp, i)], outputs=[out])) self.iterator = ListCaseIterator(cases) super(SCIDriver, self).execute()
def _create_nested_workflows(self, dbname, drivertype): # this is kind of bogus because the inner driver loops are # overwriting the values set by the outer loops, but for # this test I'm only interested in checking if the # Case hierarchy is structured properly top = set_as_top(self._create_assembly(dbname, drivertype)) driver2 = top.add('driver2', drivertype()) driver2.recorders = [DBCaseRecorder(dbname, append=True)] top.driver.workflow.add(['driver2']) driver3 = top.add('driver3', drivertype()) driver3.recorders = [DBCaseRecorder(dbname, append=True)] top.driver2.workflow.add(['driver3']) top.driver3.workflow.add(['comp1','comp2']) top.driver.iterator = ListCaseIterator(self._create_cases(1)) top.driver2.iterator = ListCaseIterator(self._create_cases(2)) top.driver3.iterator = ListCaseIterator(self._create_cases(3)) return top
def test_caseiter_to_caseset(self): cases = ListCaseIterator(self.caselist[3:]) cs = caseiter_to_caseset(cases) for case1, case2 in zip(cases, cs): self.assertTrue(case1 == case2) cssub = caseiter_to_caseset(cases, ['comp1.b', 'comp2.b', 'comp2.c+comp2.d']) for case1, case2 in zip(cases, cssub): self.assertTrue(set(case2.keys('in')).issubset(case1.keys('in'))) self.assertTrue(set(case2.keys('out')).issubset(case1.keys('out')))
def execute(self): """ Generate some cases to be evaluated. """ cases = [] for i in range(10): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10)), ('driven.raise_error', False), ('driven.stop_exec', False)] outputs = ['driven.rosen_suzuki', 'driven.sum_y'] cases.append(Case(inputs, outputs, label=str(i))) self.cases = ListCaseIterator(cases)
def test_output_errors(self): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10)), ('driven.raise_error', False), ('driven.stop_exec', False)] outputs = ['driven.rosen_suzuki', 'driven.foobar'] self.cases = [Case(inputs, outputs, label='1')] self.model.driver.sequential = True self.model.driver.iterator = ListCaseIterator(self.cases) self.model.driver.recorders = [ListCaseRecorder()] self.model.driver.printvars = ['driven.extra'] self.model.driver.error_policy = 'RETRY' self.model.run()
def test_2d_filter2(self): pf = ParetoFilter() x = [1,1,2,2,2,3,3,3,] y = [2,3,1,2,3,1,2,3] cases = [] for x_0,y_0 in zip(x,y): cases.append(Case(outputs=[("x",x_0),("y",y_0)])) pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['x','y'] pf.execute() x_p,y_p = zip(*[(case['x'],case['y']) for case in pf.pareto_set])
def test_1d_filter(self): pf = ParetoFilter() x = [1,2,3,4,5,6,7,8,9,10] cases = [Case(outputs=[("x",x_0)]) for x_0 in x] pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['x'] pf.execute() x_p = [case['x'] for case in pf.pareto_set] x_dom = [case['x'] for case in pf.dominated_set] self.assertEqual([1],x_p) self.assertEqual([2,3,4,5,6,7,8,9,10],x_dom)
def test_noresource(self): logging.debug('') logging.debug('test_noresource') # Check response to unsupported resource. self.model.driver.extra_resources = { 'allocator': 'LocalHost', 'localhost': False } self.model.driver.sequential = False self.model.driver.iterator = ListCaseIterator([]) assert_raises(self, 'self.model.run()', globals(), locals(), RuntimeError, 'driver: No servers supporting required resources')
def setup_cases(self): """ setup the cases to run This method has to be called after instantiation of the class, once the ``cases`` list has been set. """ self.runcases = [] for case in self.cases: self._logger.info('Adding case %s'% case.case_name) self.runcases.append(Case(inputs= [('runner.inputs', case)], outputs=['runner.outputs'])) self.case_driver.iterator = ListCaseIterator(self.runcases)
def test_bad_case_set(self): pf = ParetoFilter() x = [1,1,2,2,2,3,3,3,] y = [2,3,1,2,3,1,2,3] cases = [] for x_0,y_0 in zip(x,y): cases.append(Case(outputs=[("x",x_0),("y",y_0)])) pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['z','w'] try: pf.execute() except ValueError,err: self.assertEqual(str(err),": no cases provided had all of the outputs " "matching the provided criteria, ['z', 'w']")
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2)] cases.append(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) driver.iterator = ListCaseIterator(cases)
def test_save_load(self): logging.debug('') logging.debug('test_save_load') self.model.driver.iterator = ListCaseIterator(self.cases) results = ListCaseRecorder() self.model.driver.printvars = ['driven.extra'] self.model.driver.recorders = [results] # Set local dir in case we're running in a different directory. py_dir = self.directory # Exercise check_save_load(). retcode = check_save_load(self.model, py_dir=py_dir) self.assertEqual(retcode, 0)
def configure(self): if False: ## testing cid = self.add('driver', CaseIteratorDriver()) self.add('runner', PGrafSubComponent()) cid.workflow.add('runner') cid.sequential = True #False # uncomment to keep simulation directories for debugging purposes #import os #os.environ['OPENMDAO_KEEPDIRS'] = '1' cid.add_parameter('runner.inputs') cid.case_inputs.runner.inputs = [ PGrafObject(num) for num in range(4) ] if True: ## real thing print "configuring dispatcher:" super(CaseAnalyzer, self).configure() cid = self.add('driver', CaseIteratorDriver()) # self.add('runner', PGrafSubComponent()) # print self.aerocode # self.add('runner', openAeroCode()) self.add('runner', self.aerocode) cid.workflow.add('runner') # cid.sequential = True #False self.setup_cases() # comment this line out to run sequentially cid.sequential = not self.run_parallel # uncomment to keep simulation directories for debugging purposes # os.environ['OPENMDAO_KEEPDIRS'] = '1' # apparently a workaround for an openmdao bug: cid.ignore_egg_requirements = True if (self.openmdao_version < 0.1): cid.iterator = ListCaseIterator(self.runcases) else: cid.add_parameter('runner.inputs') # cid.case_inputs.runner.inputs = [PGrafObject(num) for num in range(4)] # cid.case_inputs.runner.inputs = [GenericRunCase("acase", ['x','y'], [num, num*2]) for num in range(4)] cid.case_inputs.runner.inputs = self.studycases print "dispatcher configured\n-------------------------------------------\n"
def configure(self): self.add('c0', C0_vt()) self.add('c1', C1_vt()) self.add('parallel_driver', CaseIteratorDriver()) self.driver.workflow.add(['c0', 'parallel_driver']) N = 10 self.c0.N = N self.parallel_driver.iterator = \ ListCaseIterator([Case(inputs=[('c1.i', l)]) for l in range(N)]) self.parallel_driver.workflow.add(['c1']) self.parallel_driver.recorders.append(ListCaseRecorder()) self.parallel_driver.printvars = ['c1.val'] self.connect('c0.vt', 'c1.vt')
def test_flatten(self): # create some Cases outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] self.top.driver.iterator = ListCaseIterator( [Case(inputs=inputs, outputs=outputs, label='case1')]) self.top.driver.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.driver.recorders[0].num_backups = 0 self.top.run() # now use the CSV recorder as source of Cases self.top.driver.iterator = self.top.driver.recorders[0].get_iterator() sout = StringIO.StringIO() self.top.driver.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case: case1', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' inputs:', ' comp1.x_array[0]: 2.0', ' comp1.x_array[1]: 2.0', ' comp1.x_array[2]: 2.0', ' outputs:', " comp1.a_array[0]: 1.0", " comp1.a_array[1]: 3.0", " comp1.a_array[2]: 5.5", " comp1.vt.v1: 1.0", " comp1.vt.v2: 2.0", " comp1.vt.vt2.vt3.a: 1.0", " comp1.vt.vt2.vt3.b: 12.0", " comp1.vt.vt2.x: -1.0", " comp1.vt.vt2.y: -2.0", ] lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case: case1'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index + i].startswith(' uuid:')) else: self.assertEqual(lines[index + i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_warm_start(self): metamodel = MetaModel() metamodel.name = 'meta' metamodel.default_surrogate = KrigingSurrogate() metamodel.model = Simple() metamodel.recorder = DumbRecorder() simple = Simple() cases = [] metamodel.a = 1. metamodel.b = 2. metamodel.train_next = True metamodel.run() inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)] outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)] cases.append(Case(inputs=inputs, outputs=outputs)) metamodel.a = 3. metamodel.b = 5. metamodel.train_next = True metamodel.run() inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)] outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)] cases.append(Case(inputs=inputs, outputs=outputs)) case_iter = ListCaseIterator(cases) metamodel2 = MetaModel() metamodel2.name = 'meta2' metamodel2.default_surrogate = KrigingSurrogate() metamodel2.model = Simple() metamodel2.recorder = DumbRecorder() metamodel2.warm_start_data = case_iter metamodel2.a = simple.a = 1 metamodel2.b = simple.b = 2 metamodel.train_next = True metamodel2.run() simple.run() self.assertEqual(metamodel2.c.getvalue(), 3.) self.assertEqual(metamodel2.d.getvalue(), -1.) self.assertEqual(metamodel2.c.getvalue(), simple.c) self.assertEqual(metamodel2.d.getvalue(), simple.d)
def test_rerun(self): logging.debug('') logging.debug('test_rerun') self.run_cases(sequential=True) orig_cases = self.model.driver.recorders[0].cases self.model.driver.iterator = ListCaseIterator(orig_cases) rerun_seq = (1, 3, 5, 7, 9) self.model.driver.filter = SequenceCaseFilter(rerun_seq) rerun = ListCaseRecorder() self.model.driver.printvars = ['driven.extra'] self.model.driver.recorders[0] = rerun self.model.run() self.assertEqual(len(orig_cases), 10) self.assertEqual(len(rerun.cases), len(rerun_seq)) for i, case in enumerate(rerun.cases): self.assertEqual(case, orig_cases[rerun_seq[i]])
def configure(self): self.add('driver', CaseIteratorDriver()) self.add('runner', PGrafSubComponent()) self.driver.workflow.add('runner') self.driver.sequential = False # uncomment to keep simulation directories for debugging purposes #import os #os.environ['OPENMDAO_KEEPDIRS'] = '1' cases = [] for num in range(4): cases.append( Case(inputs=[('runner.obj', PGrafObject(num)), ('runner.num', num)], outputs=['runner.result'])) self.driver.iterator = ListCaseIterator(cases) self.driver.recorders = [ListCaseRecorder()]
def __init__(self): """Creates an Assembly to run DREA and HSRnoise.""" super(DREA_HSRnoise, self).__init__() FO1 = Case(inputs=[('point', 1),('dreaprep.Mach',0.28),('alt',2000.0),('dreaprep.PC',100.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) FO2 = Case(inputs=[('point', 1),('dreaprep.Mach',0.28),('alt',2000.0),('dreaprep.PC', 65.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) App = Case(inputs=[('point', 2),('dreaprep.Mach',0.20),('alt', 394.0),('dreaprep.PC', 30.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL1 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi', 0.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL2 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',30.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL3 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',60.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) SL4 = Case(inputs=[('point', 3),('dreaprep.Mach',0.25),('alt',1000.0),('dreaprep.PC',100.0),('hsrnoise.phi',90.0)], outputs=[('drea.CFG',0.0),('hsrnoise.thetas',0.0),('hsrnoise.Freq',0.0),('hsrnoise.SPL_corr',0),('hsrnoise.OASPL30',0.0),('hsrnoise.OASPL60',0.0),('hsrnoise.OASPL90',0.0),('hsrnoise.OASPL120',0.0),('hsrnoise.OASPL150',0.0)]) cases = ListCaseIterator([FO1,FO2,App,SL1,SL2,SL3,SL4]) db_recorder = DBCaseRecorder() self.add('geo', Geometry()) self.add('dreaprep', DREAprep()) self.add('drea', DREA()) self.add('hsrnoise', HSRNOISE()) self.add('ACDgen', ACDgen()) self.add('analysis',CaseIteratorDriver()) self.analysis.iterator = cases self.analysis.recorders = [db_recorder] self.ACDgen.case_data = db_recorder.get_iterator() # Set up the workflows #--------------------------- #self.analysis.workflow.add(['dreaprep', 'drea', 'hsrnoise']) #self.driver.workflow.add(['analysis','ACDgen']) self.driver.workflow.add(['dreaprep', 'drea', 'hsrnoise']) # Connections #--------------------------- self.connect('geo',['drea.geo_in','hsrnoise.geo_in']) self.connect('alt',['dreaprep.alt','hsrnoise.ALTEVO']) self.connect('dreaprep.flow_out','drea.flow_in') self.connect('drea.flow_out','hsrnoise.flow_in') self.connect('drea.CFG','hsrnoise.CFG')
def test_run_stop_step_resume(self): logging.debug('') logging.debug('test_run_stop_step_resume') self.generate_cases() stop_case = self.cases[1] # Stop after 2 cases run. stop_case['driven.stop_exec'] = True self.model.driver.iterator = ListCaseIterator(self.cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.printvars = ['driven.extra'] self.model.driver.sequential = True try: self.model.run() except RunStopped: self.assertEqual(len(results), 2) self.verify_results() else: self.fail('Expected RunStopped') self.model.driver.step() self.assertEqual(len(results), 3) self.verify_results() self.model.driver.step() self.assertEqual(len(results), 4) self.verify_results() self.model.driver.resume() self.assertEqual(len(results), len(self.cases)) self.verify_results() try: self.model.driver.resume() except RuntimeError as exc: self.assertEqual(str(exc), 'driver: Run already complete') else: self.fail('Expected RuntimeError')
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') top.comp1.add('a_string', Str("Hello',;','", iotype='out')) top.comp1.add('a_array', Array(array([1.0, 3.0, 5.5]), iotype='out')) top.comp1.add('x_array', Array(array([1.0, 1.0, 1.0]), iotype='in')) top.comp1.add('vt', Slot(DumbVT, iotype='out')) top.comp1.vt = DumbVT() driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z', 'comp1.a_string', 'comp1.a_array[2]'] cases = [] for i in range(10): inputs = [('comp1.x', i+0.1), ('comp1.y', i*2 + .1), ('comp1.x_array[1]', 99.88)] cases.append(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) driver.iterator = ListCaseIterator(cases) self.filename = "openmdao_test_csv_case_iterator.csv"
def test_list(self): outputs = ['z1', 'z2'] cases = [] for i in range(5): inputs = [('x', i), ('y', i*2)] cases.append(Case(inputs, outputs)) iterator = ListCaseIterator(cases) for i, case in enumerate(iterator): self.assertEqual(len(case.items(iotype='in')), 2) self.assertEqual(len(case.items(iotype='out')), 2) self.assertTrue('x' in case) self.assertEqual(case['x'], i) self.assertTrue('y' in case) self.assertEqual(case['y'], i*2) self.assertTrue('z1' in case) self.assertEqual(case['z1'], _Missing) self.assertTrue('z2' in case) self.assertEqual(case['z2'], _Missing) self.assertEqual(i, 4)
def test_noinput(self): logging.debug('') logging.debug('test_noinput') # Create cases with missing input 'dc.z'. cases = [] for i in range(2): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.z', numpy_random.normal(size=10))] outputs = [('driven.rosen_suzuki', None), ('driven.sum_y', None)] cases.append(Case(inputs, outputs)) self.model.driver.iterator = ListCaseIterator(cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.printvars = ['driven.extra'] self.model.run() self.assertEqual(len(results), len(cases)) msg = "driver: Exception setting case inputs:" \ " driven: object has no attribute 'z'" for case in results.cases: self.assertEqual(case.msg, msg)