def setup_cases(self): """ setup the cases """ if (self.run_dakota): self.ws_driver.setup_cases(self.studycases, self.aerocode) else: self.runcases = [] ## cases should be list of DesignLoadCases for dlc in self.studycases: if (self.raw_cases): # goes with "tabular" input distn format print "building dlc for: ", dlc.x runcase = ParamDesignLoadCaseBuilder.buildRunCase_x( dlc.x, dlc.param_names, dlc) self.runcases.append( Case(inputs=[('runner.input', runcase)])) else: print 'Generating run cases for study case %s' % dlc.name # ask aero code to produce runcass for this study case allruns = self.aerocode.genRunCases(dlc) for runcase in allruns: print 'Adding Case for run case %s' % runcase.name # create the case # self.runcases.append(Case(inputs= [('runner.input', runcase)], # outputs=['runner.output', 'runner.input'])) self.runcases.append( Case(inputs=[('runner.input', runcase)])) ## vars used here need to exist in relevant (sub)-objects ##(ie aerocode.input needs to exist--eg in openAeroCode) , else openMDAO throws exception ## This will result in aerocode.execute() being called with self.input = runcase = relevant RunCase save_run_cases(dlc.name, allruns) self.ws_driver.iterator = ListCaseIterator(self.runcases)
def test_ei_2obj(self): ei = MultiObjExpectedImprovement() bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",10)]),Case(outputs=[("y1",1),("y2",-10)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ["y1","y2"] ei.predicted_values = [NormalDistribution(mu=1,sigma=1),NormalDistribution(mu=0,sigma=1)] ei.calc_switch = "EI" ei.execute() self.assertAlmostEqual([5.0],ei.EI,1) self.assertEqual(0.5,ei.PI,6)
def _case_from_values(self, values): return Case(inputs=[(n, v) for n, v in zip( self._names[0:self._split_idx], values[0:self._split_idx])], outputs=[(n, v) for n, v in zip(self._names[self._split_idx:], values[self._split_idx:])], parent_uuid=self._parent_uuid)
def _get_cases(self): """Generate each case.""" params = self.get_parameters().values() self.DOEgenerator.num_parameters = len(params) record_doe = self.record_doe events = self.get_events() outputs = self.case_outputs case_filter = self.case_filter if record_doe: if not self.doe_filename: self.doe_filename = '%s.csv' % self.name self._csv_file = open(self.doe_filename, 'wb') csv_writer = csv.writer(self._csv_file) for i, row in enumerate(self.DOEgenerator): if record_doe: csv_writer.writerow(['%.16g' % val for val in row]) vals = [ p.low + (p.high - p.low) * val for p, val in zip(params, row) ] case = self.set_parameters(vals, Case(parent_uuid=self._case_id)) # now add events for varname in events: case.add_input(varname, True) case.add_outputs(outputs) if case_filter is None or case_filter.select(i, case): yield case
def _get_cases(self): self.DOEgenerator.num_parameters = self.total_parameters() upper = self.get_upper_bounds() lower = self.get_lower_bounds() P = self.eval_parameters() M = (P - lower) / (upper - lower) for row in list(self.DOEgenerator) + [tuple(M)]: delta_low = P - lower k_low = 1.0 / (1.0 + (1 - self.beta) * delta_low) new_low = P - self.alpha * k_low * delta_low #/(self.exec_count+1) delta_high = upper - P k_high = 1.0 / (1.0 + (1 - self.beta) * delta_high) new_high = P + self.alpha * k_high * delta_high #/(self.exec_count+1) vals = new_low + (new_high - new_low) * row case = self.set_parameters(vals, Case(parent_uuid=self._case_id)) # now add events for varname in self.get_events(): case.add_input(varname, True) case.add_outputs(self.case_outputs) yield case
def _next_case(self): """ Generator which returns Cases one at a time. """ uuid = parent_uuid = msg = "" uuid_field = parent_uuid_field = msg_field = None if self.headers is None: input_fields = {} else: input_fields = self.headers output_fields = {} for row in self.data: # Get fieldnames from file if self._need_fieldnames: # OpenMDAO-style CSV file if '/INPUTS' in row: input_fields, output_fields = self._parse_fieldnames(row) self.timestamp_field = 1 uuid_field = row.index('/METADATA') + 1 parent_uuid_field = uuid_field + 1 msg_field = uuid_field + 2 # Read headers from file elif self.headers is None: for i, field in enumerate(row): if field == 'timestamp': self.timestamp_field = i else: input_fields[i] = field self._need_fieldnames = False continue if uuid_field is not None: uuid = row[uuid_field] parent_uuid = row[parent_uuid_field] msg = row[msg_field] inputs = [] for i, field in input_fields.iteritems(): # Convert bools from string back into bools # Note, only really need this for inputs. if row[i] in ['True', 'False']: row[i] = bool(row[i]) inputs.append((field, row[i])) outputs = [] for i, field in output_fields.iteritems(): outputs.append((field, row[i])) exc = None if not msg else Exception(msg) yield Case(inputs=inputs, outputs=outputs, exc=exc, parent_uuid=parent_uuid) self._need_fieldnames = True
def _get_cases(self): """Generate each case.""" self.DOEgenerator.num_parameters = self.total_parameters() record_doe = self.record_doe events = self.get_events() outputs = self.case_outputs case_filter = self.case_filter if record_doe: if not self.doe_filename: self.doe_filename = '%s.csv' % self.name self._csv_file = open(self.doe_filename, 'wb') csv_writer = csv.writer(self._csv_file) lower = self.get_lower_bounds() delta = self.get_upper_bounds() - lower for i, row in enumerate(self.DOEgenerator): if record_doe: csv_writer.writerow(['%.16g' % val for val in row]) vals = lower + delta * row case = self.set_parameters(vals, Case(parent_uuid=self._case_id)) # now add events for varname in events: case.add_input(varname, True) case.add_outputs(outputs) if case_filter is None or case_filter.select(i, case): yield case
def record(self, src, inputs, outputs, err, case_uuid, parent_uuid): in_names, out_names = self._name_map[src] inputs = zip(in_names, inputs) outputs = zip(out_names, outputs) self.cases.append( Case(inputs, outputs, case_uuid=case_uuid, parent_uuid=parent_uuid))
def _get_cases(self): params = self.get_parameters().values() self.DOEgenerator.num_parameters = len(params) M = [] P = [] for p in params: temp = p.evaluate() P.append(temp) M.append((temp - p.low) / (p.high - p.low)) for row in list(self.DOEgenerator) + [tuple(M)]: vals = [] for p, val, curval in zip(params, row, P): delta_low = curval - p.low k_low = 1.0 / (1.0 + (1 - self.beta) * delta_low) new_low = curval - self.alpha * k_low * delta_low #/(self.exec_count+1) delta_high = p.high - curval k_high = 1.0 / (1.0 + (1 - self.beta) * delta_high) new_high = curval + self.alpha * k_high * delta_high #/(self.exec_count+1) newval = new_low + (new_high - new_low) * val vals.append(newval) case = self.set_parameters(vals, Case(parent_uuid=self._case_id)) # now add events for varname in self.get_events(): case.add_input(varname, True) case.add_outputs(self.case_outputs) yield case
def execute(self): inp = self.comp_name + '.x' out = self.comp_name + '.y' cases = [] for i in range(self.max_iterations): cases.append(Case(inputs=[(inp, i)], outputs=[out])) Case.set_vartree_inputs(self, cases) super(CIDriver, self).execute()
def _get_cases(self): """Iterator over the cases""" for row in self.distribution_generator: case = self.set_parameters(row, Case(parent_uuid=self._case_id)) case.add_outputs(self.case_outputs) yield case
def test_ei_zero_division(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "y" ei.predicted_value = NormalDistribution(mu=1, sigma=0) ei.execute() self.assertEqual(0, ei.EI) self.assertEqual(0, ei.PI)
def test_ei(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "y" ei.predicted_value = NormalDistribution(mu=1, sigma=1) ei.execute() self.assertAlmostEqual([0.91], ei.EI, 2) self.assertAlmostEqual(0.5, ei.PI, 6)
def _next_case(self): """ Generator which returns Cases one at a time. """ # figure out which selectors are for cases and which are for variables sql = ["SELECT * FROM cases"] if self.selectors is not None: for sel in self.selectors: rhs, rel, lhs = _query_split(sel) if rhs in _casetable_attrs: if len(sql) == 1: sql.append("WHERE %s%s%s" % (rhs, rel, lhs)) else: sql.append("AND %s%s%s" % (rhs, rel, lhs)) casecur = self._connection.cursor() casecur.execute(' '.join(sql)) sql = [ 'SELECT var_id,name,case_id,sense,value from casevars WHERE case_id=%s' ] if self.selectors is not None: for sel in self.selectors: rhs, rel, lhs = _query_split(sel) if rhs in _vartable_attrs: sql.append("AND %s%s%s" % (rhs, rel, lhs)) combined = ' '.join(sql) varcur = self._connection.cursor() for cid, text_id, parent, msg, model_id, timeEnter in casecur: varcur.execute(combined % cid) inputs = [] outputs = [] for var_id, vname, case_id, sense, value in varcur: if isinstance(value, (float, int, str)): pass elif value is None: # Result when recorded value was NaN. value = float('NaN') else: try: value = loads(str(value)) except UnpicklingError as err: print 'value', type(value), repr(value) raise UnpicklingError("can't unpickle value '%s' for" " case '%s' from database: %s" % (vname, text_id, str(err))) if sense == 'i': inputs.append((vname, value)) elif sense == 'o': outputs.append((vname, value)) if len(inputs) > 0 or len(outputs) > 0: exc = Exception(msg) if msg else None yield Case(inputs=inputs, outputs=outputs, exc=exc, case_uuid=text_id, parent_uuid=parent)
def test_ei_bad_criteria(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "x" ei.predicted_value = NormalDistribution(mu=1, sigma=1) try: ei.execute() except ValueError, err: self.assertEqual( str(err), ": best_case did not have an output which " "matched the criteria, 'x'")
def test_reset_y_star_event(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.execute() bests = CaseSet() list_of_cases = [Case(outputs=[("y1",2),("y2",2),("y3",2)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.reset_y_star = True ei.execute() self.assertEqual(ei.y_star.all(),array([2,2,2]).all())
def test_ei_nobj(self): ei = MultiObjExpectedImprovement() ei.target = array([[1, 1, 1]]) list_of_cases = [Case(outputs=[("y1", 1), ("y2", 1), ("y3", 1)])] ei.criteria = ['y1', 'y2', 'y3'] ei.current = [ NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1) ] ei.execute() self.assertAlmostEqual(0.875, ei.PI, 1)
def _get_cases(self): params = self.get_parameters().values() self.DOEgenerator.num_parameters = len(params) for row in self.DOEgenerator: vals = [p.low+(p.high-p.low)*val for p,val in zip(params,row)] case = self.set_parameters(vals, Case(parent_uuid=self._case_id)) # now add events for varname in self.get_events(): case.add_input(varname,True) case.add_outputs(self.case_outputs) yield case
def test_ei_nobj(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.execute() self.assertAlmostEqual(0.875,ei.PI,1)
def test_2d_filter2(self): pf = ParetoFilter() x = [1,1,2,2,2,3,3,3,] y = [2,3,1,2,3,1,2,3] cases = [] for x_0,y_0 in zip(x,y): cases.append(Case(outputs=[("x",x_0),("y",y_0)])) pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['x','y'] pf.execute() x_p,y_p = zip(*[(case['x'],case['y']) for case in pf.pareto_set])
def test_1d_filter(self): pf = ParetoFilter() x = [1,2,3,4,5,6,7,8,9,10] cases = [Case(outputs=[("x",x_0)]) for x_0 in x] pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['x'] pf.execute() x_p = [case['x'] for case in pf.pareto_set] x_dom = [case['x'] for case in pf.dominated_set] self.assertEqual([1],x_p) self.assertEqual([2,3,4,5,6,7,8,9,10],x_dom)
def _next_case(self): """ Generator which returns Cases one at a time. """ # figure out which selectors are for cases and which are for variables sql = ["SELECT * FROM cases"] if self.selectors is not None: for sel in self.selectors: rhs, rel, lhs = _query_split(sel) if rhs in _casetable_attrs: if len(sql) == 1: sql.append("WHERE %s%s%s" % (rhs, rel, lhs)) else: sql.append("AND %s%s%s" % (rhs, rel, lhs)) casecur = self._connection.cursor() casecur.execute(' '.join(sql)) sql = [ 'SELECT var_id,name,case_id,sense,value from casevars WHERE case_id=%s' ] if self.selectors is not None: for sel in self.selectors: rhs, rel, lhs = _query_split(sel) if rhs in _vartable_attrs: sql.append("AND %s%s%s" % (rhs, rel, lhs)) combined = ' '.join(sql) varcur = self._connection.cursor() for cid, text_id, parent, label, msg, retries, model_id, timeEnter in casecur: varcur.execute(combined % cid) inputs = [] outputs = [] for var_id, vname, case_id, sense, value in varcur: if not isinstance(value, (float, int, str)): try: value = loads(str(value)) except UnpicklingError as err: raise UnpicklingError( "can't unpickle value '%s' for case '%s' from database: %s" % (vname, cname, str(err))) if sense == 'i': inputs.append((vname, value)) else: outputs.append((vname, value)) if len(inputs) > 0 or len(outputs) > 0: yield Case(inputs=inputs, outputs=outputs, retries=retries, msg=msg, label=label, case_uuid=text_id, parent_uuid=parent)
def test_bad_criteria(self): ei = MultiObjExpectedImprovement(2) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] try: ei.execute() except ValueError,err: self.assertEqual(str(err),": no cases in the provided case_set" " had output matching the provided criteria, ['y1' 'y3']")
def test_bad_case_set(self): pf = ParetoFilter() x = [1,1,2,2,2,3,3,3,] y = [2,3,1,2,3,1,2,3] cases = [] for x_0,y_0 in zip(x,y): cases.append(Case(outputs=[("x",x_0),("y",y_0)])) pf.case_sets = [ListCaseIterator(cases),] pf.criteria = ['z','w'] try: pf.execute() except ValueError,err: self.assertEqual(str(err),": no cases provided had all of the outputs " "matching the provided criteria, ['z', 'w']")
def test_ei_calc_switch(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.calc_switch = 'EI' try: ei.execute() except ValueError,err: self.assertEqual(str(err),': EI calculations not supported' ' for more than 2 objectives')
def configure(self): self.add('driver', CaseIteratorDriver()) self.add('runner', PGrafSubComponent()) self.driver.workflow.add('runner') self.driver.sequential = False # uncomment to keep simulation directories for debugging purposes #import os #os.environ['OPENMDAO_KEEPDIRS'] = '1' cases = [] for num in range(4): cases.append( Case(inputs=[('runner.obj', PGrafObject(num)), ('runner.num', num)], outputs=['runner.result'])) self.driver.iterator = ListCaseIterator(cases) self.driver.recorders = [ListCaseRecorder()]
def _next_case(self): """ Generator which returns Cases one at a time. """ # Default case label for external csv files that don't have labels. label = "External Case" retries = max_retries = 0 parent_uuid = msg = "" retries_field = None if self.headers is None: input_fields = {} else: input_fields = self.headers output_fields = {} for row in self.data: # Get fieldnames from file if self.need_fieldnames: # OpenMDAO-style CSV file if row[1] == '/INPUTS': input_fields, output_fields = self._parse_fieldnames(row) self.label_field = 0 retries_field = row.index('/METADATA') + 1 max_retries_field = retries_field + 1 parent_uuid_field = retries_field + 2 msg_field = retries_field + 3 # Read headers from file elif self.headers is None: for i, field in enumerate(row): if field == 'label': self.label_field = i else: input_fields[i] = field self.need_fieldnames = False continue if self.label_field is not None: label = row[self.label_field] if retries_field is not None: retries = row[retries_field] max_retries = row[max_retries_field] parent_uuid = row[parent_uuid_field] msg = row[msg_field] # For some reason, default for these in a case is None if not retries: retries = None if not max_retries: max_retries = None inputs = [] for i, field in input_fields.iteritems(): inputs.append((field, row[i])) outputs = [] for i, field in output_fields.iteritems(): outputs.append((field, row[i])) yield Case(inputs=inputs, outputs=outputs, label=label, \ retries=retries, max_retries=max_retries, \ parent_uuid=parent_uuid, msg=msg)
def record_case(self): """ A driver can call this function to record the current state of the current iteration as a Case into all slotted case recorders. Generally, the driver should call this function once per iteration and may also need to call it at the conclusion. All parameters, objectives, and constraints are included in the Case output, along with all extra variables listed in self.printvars. """ if not self.recorders: return case_input = [] case_output = [] iotypes = {} # Parameters if hasattr(self, 'get_parameters'): for name, param in self.get_parameters().iteritems(): if isinstance(name, tuple): name = name[0] case_input.append([name, param.evaluate(self.parent)]) iotypes[name] = 'in' # Objectives if hasattr(self, 'eval_objective'): case_output.append(["Objective", self.eval_objective()]) elif hasattr(self, 'eval_objectives'): for j, obj in enumerate(self.eval_objectives()): case_output.append(["Objective_%d" % j, obj]) # Constraints if hasattr(self, 'get_ineq_constraints'): for name, con in self.get_ineq_constraints().iteritems(): val = con.evaluate(self.parent) case_output.append(["Constraint ( %s )" % name, val]) if hasattr(self, 'get_eq_constraints'): for name, con in self.get_eq_constraints().iteritems(): val = con.evaluate(self.parent) case_output.append(["Constraint ( %s )" % name, val]) tmp_printvars = self.printvars[:] tmp_printvars.append('%s.workflow.itername' % self.name) iotypes[tmp_printvars[-1]] = 'out' # Additional user-requested variables for printvar in tmp_printvars: if '*' in printvar: printvars = self._get_all_varpaths(printvar) else: printvars = [printvar] for var in printvars: iotype = iotypes.get(var) if iotype is None: iotype = self.parent.get_metadata(var, 'iotype') iotypes[var] = iotype if iotype == 'in': val = ExprEvaluator(var, scope=self.parent).evaluate() case_input.append([var, val]) elif iotype == 'out': val = ExprEvaluator(var, scope=self.parent).evaluate() case_output.append([var, val]) else: msg = "%s is not an input or output" % var self.raise_exception(msg, ValueError) #case = Case(case_input, case_output, case_uuid=self.case_id , parent_uuid=self.parent_case_id) case = Case(case_input, case_output, parent_uuid=self._case_id) for recorder in self.recorders: recorder.record(case)
def record(self, driver, inputs, outputs, exc, case_uuid, parent_uuid): """Store the case in our internal list.""" in_names, out_names = self._cfg_map[driver] self.cases.append( Case(zip(in_names, inputs), zip(out_names, outputs), exc, case_uuid, parent_uuid))
def record_case(self): """ A driver can call this function to record the current state of the current iteration as a Case into all slotted case recorders. Generally, the driver should call this function once per iteration and may also need to call it at the conclusion. All paramters, objectives, and constraints are included in the Case output, along with all extra variables listed in self.printvars. """ if not self.recorders: return case_input = [] case_output = [] # Parameters if hasattr(self, 'get_parameters'): for name, param in self.get_parameters().iteritems(): if isinstance(name, tuple): name = name[0] case_input.append([name, param.evaluate(self.parent)]) # Objectives if hasattr(self, 'eval_objective'): case_output.append(["Objective", self.eval_objective()]) # Constraints if hasattr(self, 'get_ineq_constraints'): for name, con in self.get_ineq_constraints().iteritems(): val = con.evaluate(self.parent) if '>' in val[2]: case_output.append( ["Constraint ( %s )" % name, val[0] - val[1]]) else: case_output.append( ["Constraint ( %s )" % name, val[1] - val[0]]) if hasattr(self, 'get_eq_constraints'): for name, con in self.get_eq_constraints().iteritems(): val = con.evaluate(self.parent) case_output.append( ["Constraint ( %s )" % name, val[1] - val[0]]) # Additional user-requested variables for printvar in self.printvars: if '*' in printvar: printvars = self._get_all_varpaths(printvar) else: printvars = [printvar] for var in printvars: iotype = self.parent.get_metadata(var, 'iotype') if iotype == 'in': val = ExprEvaluator(var, scope=self.parent).evaluate() case_input.append([var, val]) elif iotype == 'out': val = ExprEvaluator(var, scope=self.parent).evaluate() case_output.append([var, val]) else: msg = "%s is not an input or output" % var self.raise_exception(msg, ValueError) # Pull iteration coord from workflow coord = self.workflow._iterbase('') case = Case(case_input, case_output, label=coord, parent_uuid=self._case_id) for recorder in self.recorders: recorder.record(case)