def test_flatten(self): dvt = DumbVT() inputs = [('comp1.a_lst', [1, 2, 3, [7, 8, 9]]), ('comp1.a_arr', array.array('d', [4, 5, 6])), ('comp1.np_arr', nparray([[1, 2], [3, 4], [5, 6]])), ('comp1.vt', dvt), ] case = Case(inputs=inputs) self.assertEqual(set(case.items(flatten=True)), set([('comp1.a_lst[0]', 1), ('comp1.a_lst[1]', 2), ('comp1.a_lst[2]', 3), ('comp1.a_lst[3][0]', 7), ('comp1.a_lst[3][1]', 8), ('comp1.a_lst[3][2]', 9), ('comp1.a_arr[0]', 4.0), ('comp1.a_arr[1]', 5.0), ('comp1.a_arr[2]', 6.0), ('comp1.np_arr[0][0]', 1), ('comp1.np_arr[0][1]', 2), ('comp1.np_arr[1][0]', 3), ('comp1.np_arr[1][1]', 4), ('comp1.np_arr[2][0]', 5), ('comp1.np_arr[2][1]', 6), ('comp1.vt.vt2.vt3.a', 1.), ('comp1.vt.vt2.vt3.b', 12.), ('comp1.vt.vt2.x', -1.), ('comp1.vt.vt2.y', -2.), ('comp1.vt.v1', 1.), ('comp1.vt.v2', 2.), ('comp1.vt.data', ''), ('comp1.vt.vt2.data', ''), ('comp1.vt.vt2.vt3.data', '')]))
def test_CSVCaseIterator_read_external_file_without_header(self): csv_data = [ '33.5, 76.2, "Hello There"\n' '3.14159, 0, "Goodbye z"\n' ] outfile = open(self.filename, 'w') outfile.writelines(csv_data) outfile.close() header_dict = { 0: "comp1.x", 1: "comp1.y", 2: "comp2.b_string" } self.top.comp2.add('b_string', Str("Hello',;','", iotype='in')) sout = StringIO.StringIO() cases = [case for case in CSVCaseIterator(filename=self.filename, headers=header_dict)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.recorders = [DumpCaseRecorder(sout)] self.top.run() self.assertEqual(self.top.comp1.x, 3.14159) self.assertEqual(self.top.comp1.y, 0.0) self.assertEqual(self.top.comp2.b_string, "Goodbye z")
def setUp(self): self.startdir = os.getcwd() self.tempdir = tempfile.mkdtemp(prefix='test_csv-') os.chdir(self.tempdir) self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') top.comp1.add('a_string', Str("Hello',;','", iotype='out')) top.comp1.add('a_array', Array(array([1.0, 3.0, 5.5]), iotype='out')) top.comp1.add('x_array', Array(array([1.0, 1.0, 1.0]), iotype='in')) top.comp1.add('b_bool', Bool(False, iotype='in')) top.comp1.add('vt', VarTree(DumbVT(), iotype='out')) top.driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z', 'comp1.a_string', 'comp1.a_array[2]'] cases = [] for i in range(10): i = float(i) inputs = [('comp1.x', i + 0.1), ('comp1.y', i * 2 + .1), ('comp1.x_array[1]', 99.88), ('comp1.b_bool', True)] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(sorted(outputs)) self.filename = "openmdao_test_csv_case_iterator.csv"
def setUp(self): inputs = [('comp1.a', 4), ('comp1.b', 8), ('comp2.b', 2)] outputs = ['comp2.c+comp2.d', 'max(comp1.d,comp2.d)'] self.case1 = Case(inputs=inputs, outputs=outputs) self.case1_dup = Case(inputs=inputs, outputs=outputs) inputs[1] = ('comp1.b', 9) self.case2 = Case(inputs=inputs, outputs=outputs)
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') top.comp1.add('a_string', Str("Hello',;','", iotype='out')) top.comp1.add('a_array', Array(array([1.0, 3.0, 5.5]), iotype='out')) top.comp1.add('x_array', Array(array([1.0, 1.0, 1.0]), iotype='in')) top.comp1.add('b_bool', Bool(False, iotype='in')) top.comp1.add('vt', VarTree(DumbVT(), iotype='out')) driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z', 'comp1.a_string', 'comp1.a_array[2]'] cases = [] for i in range(10): inputs = [('comp1.x', i+0.1), ('comp1.y', i*2 + .1), ('comp1.x_array[1]', 99.88), ('comp1.b_bool', True)] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(sorted(outputs)) self.filename_json = "openmdao_test_csv_case_iterator.json" self.filename_csv = "openmdao_test_csv_case_iterator.csv"
def test_inoutDB(self): # This test runs some cases, puts them in a DB using a DBCaseRecorder, # then runs the model again using the same cases, pulled out of the DB # by a DBCaseIterator. Finally the cases are dumped to a string after # being run for the second time. self.top.recorders = [DBCaseRecorder()] self.top.run() # Gui pane stuff attrs = self.top.recorders[0].get_attributes() self.assertTrue("Inputs" in attrs.keys()) self.assertTrue({'name': 'dbfile', 'id': 'dbfile', 'type': 'str', 'connected': '', 'value': ':memory:', 'desc': 'Name of the database file to be recorded. Default ' + 'is ":memory:", which writes the database to memory.'} in attrs['Inputs']) # now use the DB as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()] Case.set_vartree_inputs(self.top.driver, cases) sout = StringIO.StringIO() self.top.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case:', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' timestamp: 1383239074.309192', ' inputs:', " comp1.a_dict: {'a': 'b'}", " comp1.a_list: ['a', 'b']", ' comp1.x: 8.0', ' comp1.y: 16.0', ' outputs:', ' Response(comp1.z): 24.0', ' Response(comp2.z): 25.0', ] # print sout.getvalue() lines = sout.getvalue().split('\n') count = 0 for index, line in enumerate(lines): if line.startswith('Case:'): count += 1 if count != 9: continue for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_inoutDB(self): # This test runs some cases, puts them in a DB using a DBCaseRecorder, # then runs the model again using the same cases, pulled out of the DB # by a DBCaseIterator. Finally the cases are dumped to a string after # being run for the second time. self.top.recorders = [DBCaseRecorder()] self.top.run() # now use the DB as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()] Case.set_vartree_inputs(self.top.driver, cases) sout = StringIO.StringIO() self.top.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ "Case:", " uuid: d99424f3-9c1b-11e4-801d-20c9d0478eff", " timestamp: 1421260415.638640", " inputs:", " comp1.a_dict: {'a': 'b'}", " comp1.a_list: ['a', 'b']", " comp1.x: 8.0", " comp1.y: 16.0", " outputs:", " _pseudo_0.out0: 24.0", " _pseudo_1.out0: 25.0", " comp1.derivative_exec_count: 0", " comp1.exec_count: 19", " comp1.itername: 9-comp1", " comp1.z: 24.0", " comp2.derivative_exec_count: 0", " comp2.exec_count: 19", " comp2.itername: 9-comp2", " comp2.z: 25.0", " driver.workflow.itername: 9", ] #print sout.getvalue() lines = sout.getvalue().split('\n') count = 0 for index, line in enumerate(lines): if line.startswith('Case:'): count += 1 if count != 9: continue for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_CSVCaseRecorder_messages(self): rec = CSVCaseRecorder(filename=self.filename) rec.record(Case(inputs=[('comp1.x',2.0),('comp1.y',4.3),('comp2.x',1.9)])) try: rec.record(Case(inputs=[('comp1.x',2.0),('comp2.x',1.9)])) except Exception as err: self.assertEqual(str(err), "number of data points doesn't match header size in CSV recorder") else: self.fail("Exception expected") finally: rec.close()
def setUp(self): cases = [] for i in range(20): inputs = [('comp1.x', float(i)), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i), 0.5))] case = Case(inputs=inputs) case._outputs = dict(outputs) cases.append(case) self.caseiter = ListCaseIterator(cases) self.varnames = ['comp2.normal', 'comp1.x', 'comp1.z']
def setUp(self): inputs = [('comp1.a', 4), ('comp1.b', 8), ('comp2.b', 2)] outputs = ['comp2.c+comp2.d', 'max(comp1.d,comp2.d)'] self.case1 = Case(inputs=inputs, outputs=outputs) self.case1_dup = Case(inputs=inputs, outputs=outputs) # a duplicate case inputs[1] = ('comp1.b', 9) self.case2 = Case(inputs=inputs, outputs=outputs) self.caselist = [self.case1, self.case2, self.case1_dup] for i in range(5): inputs[1] = ('comp1.b', 10+i) self.caselist.append(Case(inputs=inputs, outputs=outputs)) self.caselist.append(Case(inputs=inputs, outputs=outputs)) # another dup
def test_inoutCSV_delimiter(self): # Repeat test above using semicolon delimiter and ' as quote char. self.top = set_as_top(TestAssembly()) self.top.recorders = [CSVCaseRecorder(filename=self.filename, delimiter=';', quotechar="'")] self.top.run() # now use the CSV recorder as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()] driver = self.top.add('driver', SimpleCaseIterDriver()) Case.set_vartree_inputs(self.top.driver, cases) sout = StringIO.StringIO() self.top.recorders = [DumpCaseRecorder(sout)] driver.add_responses(['comp.x',]) self.top.run() # Check the results expected = [ "Case:", " uuid: 2983e819-9c1b-11e4-804e-20c9d0478eff", " timestamp: 1421260120.252554", " inputs:", " comp.dummy_data.dummy1: 0.0333333393814", " outputs:", " _pseudo_1.out0: -2.19888892477", " comp.derivative_exec_count: 0", " comp.exec_count: 46", " comp.itername: 4-comp", " comp.x: -2.19888892477", " driver.workflow.itername: 4", ] lines = sout.getvalue().split('\n') count = 0 for index, line in enumerate(lines): if line.startswith('Case:'): count += 1 if count != 4: continue for i in range(len(expected)): prefix = expected[i].split(':')[0] if prefix.lstrip() in ['uuid', 'timestamp', 'comp.exec_count']: # these values vary, just check proper prefix & indentation self.assertTrue(lines[index+i].startswith(prefix+':')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_expr(self): cases = ( Case(inputs=(('comp1.a', 4), ), outputs=(('comp2.b', 3), )), Case(inputs=(('comp1.a', 6), ), outputs=(('comp2.b', 3), )), Case(inputs=(('comp1.a', 4), ), outputs=(('comp2.b', 2), )), ) # By sequence number. filter = ExprCaseFilter('seqno > 1') accepted = [] for seqno, case in enumerate(cases): if filter.select(seqno, case): accepted.append(seqno)
def test_update_empty(self): c1 = Case(inputs=[('x', 10)], outputs=[('y', 10)]) c2 = Case(inputs=[('x', 1)], outputs=[('y', 1)]) cs1 = CaseSet() cs1.record_case(c1) cs1.record_case(c2) cs2 = CaseSet() cs2.update(cs1) for c1, c2 in zip(cs1, cs2): self.assertEqual(c1, c2)
def test_inoutCSV(self): #This test runs some cases, puts them in a CSV file using a #CSVCaseRecorder, then runs the model again using the same cases, #pulled out of the CSV file by a CSVCaseIterator. Finally the cases #are dumped to a string after being run for the second time. self.top.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.recorders[0].num_backups = 0 self.top.run() # now use the CSV recorder as source of Cases cases = [case for case in self.top.recorders[0].get_iterator()] Case.set_vartree_inputs(self.top.driver, cases) sout = StringIO.StringIO() self.top.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case:', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' timestamp: 1383239074.309192', ' inputs:', ' comp1.b_bool: True', ' comp1.x: 8.1', ' comp1.x_array[1]: 99.88', ' comp1.y: 16.1', ' outputs:', ' Response(comp1.a_array[2]): 5.5', " Response(comp1.a_string): Hello',;','", ' Response(comp1.z): 24.2', ' Response(comp2.z): 25.2', ] # print sout.getvalue() lines = sout.getvalue().split('\n') count = 0 for index, line in enumerate(lines): if line.startswith('Case:'): count += 1 if count != 9: continue for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_close(self): # :memory: can be used after close. recorder = DBCaseRecorder() case = Case(inputs=[('str', 'Normal String'), ('unicode', u'Unicode String'), ('list', ['Hello', 'world'])]) # Check pickling. recorder.record(case) recorder.close() recorder.record(case) # File-based DB recorder can not be used after close. tmpdir = tempfile.mkdtemp() try: dfile = os.path.join(tmpdir, 'junk.db') recorder = DBCaseRecorder(dfile) recorder.record(case) recorder.close() assert_raises(self, 'recorder.record(case)', globals(), locals(), RuntimeError, 'Attempt to record on closed recorder') finally: try: shutil.rmtree(tmpdir) except OSError: logging.error("problem removing directory %s" % tmpdir)
def test_close(self): c1 = Case(inputs=[ ('x', 10), ], outputs=[('y', 10)]) cs1 = CaseSet() cs1.record(c1) cs1.close()
def run_test(model, initial, limit, max_servers): """ Run test with the given model. """ results = [] duration = initial while duration < limit: print 'run test, delay %s' % duration template = Case(inputs=[('sleeper.delay', None, duration)]) model.driver.iterator = Iterator(template) model.driver.recorders = [Recorder(model.driver.iterator, duration)] start = time.time() model.driver.recorder.start = start model.run() et = time.time() - start n_cases = model.driver.recorders[0].n_cases if n_cases > 0: print ' %d cases done in %.2f (%.2f sec/case)' \ % (n_cases, et, et/n_cases) if n_cases < MAX_TRIALS: payoff_case = model.driver.recorders[0].payoff_case if payoff_case < n_cases: print ' payoff at %d' % payoff_case results.append((duration, payoff_case, n_cases, et)) if payoff_case == 2 or n_cases <= (max_servers + 1): print ' (minimum case limit)' break else: print ' no concurrency advantage' duration *= 2. else: print ' no successful cases, aborting' break return results
def run_suite(resource_desc=None, name=None): """ Run suite of tests using `resource_desc` and resord under `name`. """ resource_desc = resource_desc or {} name = name or '' print '\n%s' % name initial = 0.01 limit = 20 results = {} max_servers = ResourceAllocationManager.max_servers(resource_desc) print 'max servers', max_servers model = CID() model.driver.reload_model = False model.driver.sequential = False # Save to an egg to avoid analysis overhead during run_test(). print '\nInitializing egg module analysis' template = Case(inputs=[('sleeper.delay', None, 0.01)]) model.driver.iterator = Iterator(template) model.driver.recorders = [Recorder(model.driver.iterator, 1000)] start = time.time() egg_filename, required_distributions, orphan_modules = \ model.save_to_egg('caseperf', '0') et = time.time() - start print ' done in %.2f' % et os.remove(egg_filename) print results = run_test(model, initial, limit, max_servers) record_results(results, name)
def post_iteration(self): """ Checks CONMIN's return status and writes out cases.""" super(CONMINdriver, self).post_iteration() # Iteration count comes from CONMIN. You can't just count over the # loop because some cycles do other things (e.g., numerical # gradient calculation) if self.iter_count != self.cnmn1.iter: self.iter_count = self.cnmn1.iter if self.recorders: # Write out some relevant information to the recorder dvals = [float(val) for val in self.design_vals[:-2]] case_input = [] for var, val in zip(self.get_parameters().keys(), dvals): case_input.append([var[0], val]) if self.printvars: case_output = [(name, ExprEvaluator(name, scope=self.parent).evaluate()) for name in self.printvars] else: case_output = [] case_output.append(["objective", self.cnmn1.obj]) for i, val in enumerate(self.constraint_vals): case_output.append(["Constraint%d" % i, val]) case = Case(case_input, case_output,parent_uuid=self._case_id) #FIXME: the driver should probably just add its own recorder for this information # instead of just putting it into the first recorder it finds self.recorders[0].record(case)
def test_flatten(self): # create some Cases outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] cases = [Case(inputs=inputs, outputs=outputs)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.driver.clear_responses() self.top.driver.add_responses(outputs) self.top.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.recorders[0].num_backups = 0 self.top.run() # check recorded cases cases = [case for case in self.top.recorders[0].get_iterator()]
def test_inoutCSV_empty_inputs(self): # now create some Cases outputs = ['comp1.z'] cases = [] for i in range(10): cases.append(Case(inputs=[], outputs=outputs, label='case%s'%i)) self.top.driver.iterator = ListCaseIterator(cases) self.top.driver.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.run() # now use the CSV recorder as source of Cases self.top.driver.iterator = self.top.driver.recorders[0].get_iterator() sout = StringIO.StringIO() self.top.driver.recorders = [DumpCaseRecorder(sout)] self.top.run() expected = [ 'Case: case8', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' outputs:', ' comp1.z: 0.0', ] lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case: case8'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_nooutput(self): logging.debug('') logging.debug('test_nooutput') # Create cases with missing output 'dc.sum_z'. cases = [] for i in range(2): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10))] outputs = [('driven.rosen_suzuki', None), ('driven.sum_z', None)] cases.append(Case(inputs, outputs)) self.model.driver.iterator = ListCaseIterator(cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.error_policy = 'RETRY' self.model.run() self.assertEqual(len(results), len(cases)) msg = "driver: Exception getting case outputs: " \ "driven: 'DrivenComponent' object has no attribute 'sum_z'" for case in results.cases: self.assertEqual(case.msg, msg)
def test_close(self): self.top.driver.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.run() case = Case(inputs=[('comp2.a_slot', None)]) assert_raises(self, 'self.top.driver.recorders[0].record(case)', globals(), locals(), RuntimeError, 'Attempt to record on closed recorder')
def test_nooutput(self): logging.debug('') logging.debug('test_nooutput') # Create cases with missing output 'dc.sum_z'. cases = [] for i in range(2): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10))] outputs = [('driven.rosen_suzuki', None), ('driven.sum_z', None)] cases.append(Case(inputs, outputs)) self.model.driver.iterator = ListCaseIterator(cases) results = ListCaseRecorder() self.model.driver.recorders = [results] self.model.driver.printvars = ['driven.extra'] self.model.driver.error_policy = 'RETRY' self.model.run() self.assertEqual(len(results), len(cases)) for case in results.cases: expected = "driver: Exception getting case outputs: " \ "driven \(UUID.[0-9]+-1\): " \ "'DrivenComponent' object has no attribute 'sum_z'" msg = replace_uuid(case.msg) self.assertTrue(re.match(expected, msg))
def test_subassembly(self): logging.debug('') logging.debug('test_subassembly') top = set_as_top(Assembly()) sub = top.add('sub', Assembly()) sub.force_execute = True top.driver.workflow.add('sub') sub.add('driver', CaseIteratorDriver()) sub.add('comp1', TracedComponent()) sub.add('comp2', TracedComponent()) sub.driver.workflow.add(('comp1', 'comp2')) cases = [] for i in range(3): cases.append( Case(label=str(i), inputs=(('comp1.inp', i), ('comp2.inp', i)), outputs=(('comp1.itername', 'comp2.itername')))) # Sequential. sub.driver.iterator = ListCaseIterator(cases) top.run() self.verify_itername(sub.driver.evaluated, subassembly=True) # Concurrent. sub.driver.sequential = False sub.driver.iterator = ListCaseIterator(cases) top.run() self.verify_itername(sub.driver.evaluated, subassembly=True)
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', CaseIteratorDriver()) top.add('comp1', TExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2)] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(outputs)
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i * 2)] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(outputs)
def execute(self): """ Runs with various box parameter values. """ for width in range(1, 2): for height in range(1, 3): for depth in range(1, 4): self._logger.debug("w,h,d %s, %s, %s", width, height, depth) self.set_parameters((width, height, depth)) self.workflow.run() volume, area = self.eval_objectives() self._logger.debug(" v,a %s, %s", volume, area) case = Case() case.inputs = [("width", None, width), ("height", None, height), ("depth", None, depth)] case.outputs = [("volume", None, volume), ("area", None, area), ("pid", None, self.parent.box.pid)] # Just to show access to remote from driver. for recorder in self.recorders: recorder.record(case)
def setup_cases(self): """ setup the cases """ # openmdao through v0.9.5 at least, breaks at 0.1.0 self.runcases = [] run_case_builder = self.aerocode.getRunCaseBuilder() for dlc in self.studycases: self.runcases.append(Case(inputs=[('runner.inputs', dlc)])) print "building dlc for: ", dlc.x, dlc.name
def _create_cases(self, level): outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(self.num_cases): inputs = [('comp1.x', 100*level+i), ('comp1.y', 100*level+i+1)] cases.append(Case(inputs=inputs, outputs=outputs, label='L%d_case%d'%(level,i))) return cases
def execute(self): inp = self.comp_name + '.x' out = self.comp_name + '.y' cases = [] for i in range(self.max_iterations): cases.append(Case(inputs=[(inp, i)], outputs=[out])) self.iterator = ListCaseIterator(cases) super(SCIDriver, self).execute()
def test_warm_start(self): metamodel = MetaModel() metamodel.name = 'meta' metamodel.default_surrogate = KrigingSurrogate() metamodel.model = Simple() metamodel.recorder = DumbRecorder() simple = Simple() cases = [] metamodel.a = 1. metamodel.b = 2. metamodel.train_next = True metamodel.run() inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)] outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)] cases.append(Case(inputs=inputs, outputs=outputs)) metamodel.a = 3. metamodel.b = 5. metamodel.train_next = True metamodel.run() inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)] outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)] cases.append(Case(inputs=inputs, outputs=outputs)) case_iter = ListCaseIterator(cases) metamodel2 = MetaModel() metamodel2.name = 'meta2' metamodel2.default_surrogate = KrigingSurrogate() metamodel2.model = Simple() metamodel2.recorder = DumbRecorder() metamodel2.warm_start_data = case_iter metamodel2.a = simple.a = 1 metamodel2.b = simple.b = 2 metamodel.train_next = True metamodel2.run() simple.run() self.assertEqual(metamodel2.c.getvalue(), 3.) self.assertEqual(metamodel2.d.getvalue(), -1.) self.assertEqual(metamodel2.c.getvalue(), simple.c) self.assertEqual(metamodel2.d.getvalue(), simple.d)
def execute(self): self.set_events() self.run_iteration() inputs = [(name, self.parent.get(name)) for name in self.ins] outputs = [(name, self.parent.get(name)) for name in self.outs] case = Case(inputs=inputs, outputs=outputs) for recorder in self.recorders: recorder.record(case)
def test_sequence(self): seq_numbers = range(10) ok_numbers = [1, 3, 5] accepted = [] filter = SequenceCaseFilter(ok_numbers) for seqno in seq_numbers: if filter.select(seqno, Case()): accepted.append(seqno) self.assertEqual(accepted, ok_numbers)
def test_slice(self): seq_numbers = range(10) ok_numbers = range(2, 10, 3) accepted = [] filter = SliceCaseFilter(start=2, stop=10, step=3) for seqno in seq_numbers: if filter.select(seqno, Case()): accepted.append(seqno) self.assertEqual(accepted, ok_numbers)
def test_string(self): recorder = DBCaseRecorder() case = Case(inputs=[('str', 'Normal String'), ('unicode', u'Unicode String'), ('list', ['Hello', 'world'])]) # Check pickling. recorder.record(case) for case in recorder.get_iterator(): self.assertEqual(case['str'], 'Normal String') self.assertEqual(case['unicode'], u'Unicode String') self.assertEqual(case['list'], ['Hello', 'world'])
def generate_cases(self, force_errors=False): self.cases = [] for i in range(10): raise_error = force_errors and i % 4 == 3 inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10)), ('driven.raise_error', raise_error), ('driven.stop_exec', False)] outputs = ['driven.rosen_suzuki', 'driven.sum_y'] self.cases.append(Case(inputs, outputs, label=str(i)))
def execute(self): """ Generate some cases to be evaluated. """ cases = [] for i in range(10): inputs = [('driven.x', numpy_random.normal(size=4)), ('driven.y', numpy_random.normal(size=10)), ('driven.raise_error', False), ('driven.stop_exec', False)] outputs = ['driven.rosen_suzuki', 'driven.sum_y'] cases.append(Case(inputs, outputs, label=str(i))) self.cases = ListCaseIterator(cases)
def setUp(self): cases = [] for i in range(20): inputs = [('comp1.x', float(i)), ('comp1.y', i * 2.)] outputs = [('comp1.z', i * 1.5), ('comp2.normal', NormalDistribution(float(i), 0.5))] case = Case(inputs=inputs) case._outputs = dict(outputs) cases.append(case) self.caseiter = ListCaseIterator(cases) self.varnames = ['comp2.normal', 'comp1.x', 'comp1.z']
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', CaseIteratorDriver()) top.add('comp1', TExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): i = float(i) inputs = [('comp1.x', i), ('comp1.y', i * 2)] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(outputs) self.tempdir = tempfile.mkdtemp(prefix='test_jsonrecorder-')
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.comp1.add('a_dict', Dict({}, iotype='in')) top.comp1.add('a_list', List([], iotype='in')) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2), ('comp1.a_dict', {'a': 'b'}), ('comp1.a_list', ['a', 'b'])] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(outputs)
def execute(self): """ Runs with various box parameter values. """ for width in range(1, 2): for height in range(1, 3): for depth in range(1, 4): self._logger.debug('w,h,d %s, %s, %s', width, height, depth) self.set_parameters((width, height, depth)) self.workflow.run() volume, area = self.eval_objectives() self._logger.debug(' v,a %s, %s', volume, area) case = Case() case.inputs = [('width', None, width), ('height', None, height), ('depth', None, depth)] case.outputs = [('volume', None, volume), ('area', None, area), ('pid', None, self.parent.box.pid)] # Just to show access to remote from driver. self.recorder.record(case)
def test_CSVCaseIterator_read_external_file_with_header(self): csv_data = ['"comp1.x", "comp1.y", "comp2.b_string"\n', '33.5, 76.2, "Hello There"\n' '3.14159, 0, "Goodbye z"\n' ] outfile = open(self.filename, 'w') outfile.writelines(csv_data) outfile.close() self.top.comp2.add('b_string', Str("Hello',;','", iotype='in')) sout = StringIO.StringIO() cases = [case for case in CSVCaseIterator(filename=self.filename)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.recorders = [DumpCaseRecorder(sout)] self.top.run() self.assertEqual(self.top.comp1.x, 3.14159) self.assertEqual(self.top.comp1.y, 0.0) self.assertEqual(self.top.comp2.b_string, "Goodbye z") # Gui pane stuff iterator = CSVCaseIterator(filename=self.filename) attrs = iterator.get_attributes() self.assertTrue("Inputs" in attrs.keys()) self.assertTrue({'name': 'filename', 'type': 'str', 'connected': '', 'value': 'openmdao_test_csv_case_iterator.csv', 'desc': 'Name of the CSV file to be iterated.'} in attrs['Inputs']) self.assertTrue({'name': 'headers', 'type': 'NoneType', 'connected': '', 'value': 'None', 'desc': 'Optional dictionary of header labels, where the key is the column number.'} in attrs['Inputs'])
def _create_nested_assemblies(self, dbname, drivertype): top = set_as_top(self._create_assembly(dbname, drivertype)) top.add('asm', self._create_assembly(dbname, drivertype)) top.driver.workflow.add('asm') top.asm.add('asm', self._create_assembly(dbname, drivertype)) top.asm.driver.workflow.add('asm') top.recorders = [DBCaseRecorder(dbname, append=True)] Case.set_vartree_inputs(top.driver, self._create_cases(1)) Case.set_vartree_inputs(top.asm.driver, self._create_cases(2)) Case.set_vartree_inputs(top.asm.asm.driver, self._create_cases(3)) return top
def _create_nested_workflows(self, dbname, drivertype): # this is kind of bogus because the inner driver loops are # overwriting the values set by the outer loops, but for # this test I'm only interested in checking if the # Case hierarchy is structured properly top = set_as_top(self._create_assembly(dbname, drivertype)) driver2 = top.add('driver2', drivertype()) top.driver.workflow.add(['driver2']) driver3 = top.add('driver3', drivertype()) top.driver2.workflow.add(['driver3']) top.driver3.workflow.add(['comp1', 'comp2']) Case.set_vartree_inputs(top.driver, self._create_cases(1)) Case.set_vartree_inputs(top.driver2, self._create_cases(2)) Case.set_vartree_inputs(top.driver3, self._create_cases(3)) return top
def test_flatten(self): # try it after creating some Cases # more rigorous checking of the csv outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] cases = [Case(inputs=inputs, outputs=outputs)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.driver.clear_responses() self.top.driver.add_responses(outputs) self.top.recorders = [JSONCaseRecorder(self.filename_json)] self.top.recorders[0].num_backups = 0 self.top.run() cds = CaseDataset(self.filename_json, 'json') data = cds.data.fetch() # results caseset_query_to_csv( data, self.filename_csv) # check recorded cases cases = [case for case in CSVCaseIterator(filename=self.filename_csv)] sout = StringIO.StringIO() for case in cases: print >>sout, case expected = \ '''Case: uuid: 07280785-9b76-11e4-800d-20c9d0478eff timestamp: 1421189195.646824 parent_uuid: 0720c385-9b76-11e4-b796-20c9d0478eff inputs: comp1.x_array[0]: 2.0 comp1.x_array[1]: 2.0 comp1.x_array[2]: 2.0 outputs: _pseudo_4.out0[0]: 1.0 _pseudo_4.out0[1]: 3.0 _pseudo_4.out0[2]: 5.5 _pseudo_5.out0.data: _pseudo_5.out0.v1: 1.0 _pseudo_5.out0.v2: 2.0 _pseudo_5.out0.vt2.data: _pseudo_5.out0.vt2.vt3.a: 1.0 _pseudo_5.out0.vt2.vt3.b: 12.0 _pseudo_5.out0.vt2.vt3.data: _pseudo_5.out0.vt2.x: -1.0 _pseudo_5.out0.vt2.y: -2.0 comp1.a_array[0]: 1.0 comp1.a_array[1]: 3.0 comp1.a_array[2]: 5.5 comp1.a_string: Hello',;',' comp1.derivative_exec_count: 0.0 comp1.exec_count: 1.0 comp1.itername: 1-comp1 comp1.vt.data: comp1.vt.v1: 1.0 comp1.vt.v2: 2.0 comp1.vt.vt2.data: comp1.vt.vt2.vt3.a: 1.0 comp1.vt.vt2.vt3.b: 12.0 comp1.vt.vt2.vt3.data: comp1.vt.vt2.x: -1.0 comp1.vt.vt2.y: -2.0 comp1.z: 0.0 comp2.derivative_exec_count: 0.0 comp2.exec_count: 1.0 comp2.itername: 1-comp2 comp2.z: 1.0 driver.workflow.itername: 1 ''' print sout.getvalue() lines = sout.getvalue().split('\n') expected_lines = expected.split('\n') for index, line in enumerate(lines): if line.startswith('Case:'): for i in range(len(expected_lines)): if expected_lines[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected_lines[i].startswith(' parent_uuid:'): self.assertTrue(lines[index+i].startswith(' parent_uuid:')) elif expected_lines[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected_lines[i]) break else: self.fail("couldn't find the expected Case")
def test_flatten(self): # create some Cases outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] cases = [Case(inputs=inputs, outputs=outputs)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.driver.clear_responses() self.top.driver.add_responses(outputs) self.top.recorders = [CSVCaseRecorder(filename=self.filename)] self.top.recorders[0].num_backups = 0 self.top.run() # check recorded cases cases = [case for case in self.top.recorders[0].get_iterator()] sout = StringIO.StringIO() for case in cases: print >>sout, case expected = [ 'Case:', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' timestamp: 1383238593.781986', ' inputs:', ' comp1.x_array[0]: 2.0', ' comp1.x_array[1]: 2.0', ' comp1.x_array[2]: 2.0', ' outputs:', ' Response(comp1.a_array)[0]: 1.0', ' Response(comp1.a_array)[1]: 3.0', ' Response(comp1.a_array)[2]: 5.5', ' Response(comp1.vt).data: ', ' Response(comp1.vt).v1: 1.0', ' Response(comp1.vt).v2: 2.0', ' Response(comp1.vt).vt2.data: ', ' Response(comp1.vt).vt2.vt3.a: 1.0', ' Response(comp1.vt).vt2.vt3.b: 12.0', ' Response(comp1.vt).vt2.vt3.data: ', ' Response(comp1.vt).vt2.x: -1.0', ' Response(comp1.vt).vt2.y: -2.0', ' comp1.a_array[0]: 1.0', ' comp1.a_array[1]: 3.0', ' comp1.a_array[2]: 5.5', " comp1.a_string: Hello',;','", ' comp1.derivative_exec_count: 0.0', ' comp1.exec_count: 1.0', ' comp1.itername: 1-comp1', ' comp1.vt.data: ', ' comp1.vt.v1: 1.0', ' comp1.vt.v2: 2.0', ' comp1.vt.vt2.data: ', ' comp1.vt.vt2.vt3.a: 1.0', ' comp1.vt.vt2.vt3.b: 12.0', ' comp1.vt.vt2.vt3.data: ', ' comp1.vt.vt2.x: -1.0', ' comp1.vt.vt2.y: -2.0', ' comp1.z: 0.0', ' comp2.derivative_exec_count: 0.0', ' comp2.exec_count: 1.0', ' comp2.itername: 1-comp2', ' comp2.z: 1.0', ' driver.workflow.itername: 1', ] # print sout.getvalue() lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case:'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case") # now use the CSV recorder as source of Cases self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) sout = StringIO.StringIO() self.top.recorders = [DumpCaseRecorder(sout)] # Dump not flattened. self.top.run() expected = [ 'Case:', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' timestamp: 1383238593.781986', ' inputs:', ' comp1.x_array[0]: 2.0', ' comp1.x_array[1]: 2.0', ' comp1.x_array[2]: 2.0', ' outputs:', ' Response(comp1.a_array): [ 1. 3. 5.5]', ' Response(comp1.vt): <openmdao.main.test.test_vartree.DumbVT object', ' comp1.a_array: [ 1. 3. 5.5]', " comp1.a_string: Hello',;','", ' comp1.derivative_exec_count: 0', ' comp1.exec_count: 2', ' comp1.itername: 1-comp1', ' comp1.vt: <openmdao.main.test.test_vartree.DumbVT object', ' comp1.z: 0.0', ' comp2.derivative_exec_count: 0', ' comp2.exec_count: 2', ' comp2.itername: 1-comp2', ' comp2.z: 1.0', ' driver.workflow.itername: 1', ] # print sout.getvalue() lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case:'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) elif expected[i].startswith(' Response(comp1.vt):') or \ expected[i].startswith(' comp1.vt:'): self.assertTrue(lines[index+i].startswith(expected[i])) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")
def test_flatten(self): # try it after creating some Cases # more rigorous checking of the csv outputs = ['comp1.a_array', 'comp1.vt'] inputs = [('comp1.x_array', array([2.0, 2.0, 2.0]))] cases = [Case(inputs=inputs, outputs=outputs)] self.top.driver.clear_parameters() Case.set_vartree_inputs(self.top.driver, cases) self.top.driver.clear_responses() self.top.driver.add_responses(outputs) self.top.recorders = [JSONCaseRecorder(self.filename_json)] self.top.recorders[0].num_backups = 0 self.top.run() cds = CaseDataset(self.filename_json, 'json') data = cds.data.fetch() # results caseset_query_to_csv( data, self.filename_csv) # check recorded cases cases = [case for case in CSVCaseIterator(filename=self.filename_csv)] sout = StringIO.StringIO() for case in cases: print >>sout, case expected = [ 'Case:', ' uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' timestamp: 1383238593.781986', ' parent_uuid: ad4c1b76-64fb-11e0-95a8-001e8cf75fe', ' inputs:', ' comp1.x_array[0]: 2.0', ' comp1.x_array[1]: 2.0', ' comp1.x_array[2]: 2.0', ' outputs:', ' Response(comp1.a_array)[0]: 1.0', ' Response(comp1.a_array)[1]: 3.0', ' Response(comp1.a_array)[2]: 5.5', ' Response(comp1.vt).data: ', ' Response(comp1.vt).v1: 1.0', ' Response(comp1.vt).v2: 2.0', ' Response(comp1.vt).vt2.data: ', ' Response(comp1.vt).vt2.vt3.a: 1.0', ' Response(comp1.vt).vt2.vt3.b: 12.0', ' Response(comp1.vt).vt2.vt3.data: ', ' Response(comp1.vt).vt2.x: -1.0', ' Response(comp1.vt).vt2.y: -2.0', ' comp1.a_array[0]: 1.0', ' comp1.a_array[1]: 3.0', ' comp1.a_array[2]: 5.5', " comp1.a_string: Hello',;','", ' comp1.derivative_exec_count: 0.0', ' comp1.exec_count: 1.0', ' comp1.itername: 1-comp1', ' comp1.vt.data: ', ' comp1.vt.v1: 1.0', ' comp1.vt.v2: 2.0', ' comp1.vt.vt2.data: ', ' comp1.vt.vt2.vt3.a: 1.0', ' comp1.vt.vt2.vt3.b: 12.0', ' comp1.vt.vt2.vt3.data: ', ' comp1.vt.vt2.x: -1.0', ' comp1.vt.vt2.y: -2.0', ' comp1.z: 0.0', ' comp2.derivative_exec_count: 0.0', ' comp2.exec_count: 1.0', ' comp2.itername: 1-comp2', ' comp2.z: 1.0', ' driver.workflow.itername: 1', ] #print sout.getvalue() lines = sout.getvalue().split('\n') for index, line in enumerate(lines): if line.startswith('Case:'): for i in range(len(expected)): if expected[i].startswith(' uuid:'): self.assertTrue(lines[index+i].startswith(' uuid:')) elif expected[i].startswith(' parent_uuid:'): self.assertTrue(lines[index+i].startswith(' parent_uuid:')) elif expected[i].startswith(' timestamp:'): self.assertTrue(lines[index+i].startswith(' timestamp:')) else: self.assertEqual(lines[index+i], expected[i]) break else: self.fail("couldn't find the expected Case")