Пример #1
0
    def setup_cases(self):
        """ setup the cases """
        if (self.run_dakota):
            self.ws_driver.setup_cases(self.studycases, self.aerocode)
        else:
            self.runcases = []
            ## cases should be list of DesignLoadCases
            for dlc in self.studycases:
                if (self.raw_cases):  # goes with "tabular" input distn format
                    print "building dlc for: ", dlc.x
                    runcase = ParamDesignLoadCaseBuilder.buildRunCase_x(
                        dlc.x, dlc.param_names, dlc)
                    self.runcases.append(
                        Case(inputs=[('runner.input', runcase)]))
                else:
                    print 'Generating run cases for study case %s' % dlc.name
                    # ask aero code to produce runcass for this study case
                    allruns = self.aerocode.genRunCases(dlc)
                    for runcase in allruns:
                        print 'Adding Case for run case %s' % runcase.name
                        # create the case
                        #                self.runcases.append(Case(inputs= [('runner.input', runcase)],
                        #                                          outputs=['runner.output', 'runner.input']))
                        self.runcases.append(
                            Case(inputs=[('runner.input', runcase)]))
                        ## vars used here need to exist in relevant (sub)-objects
                        ##(ie aerocode.input needs to exist--eg in openAeroCode) , else openMDAO throws exception
                        ## This will result in aerocode.execute() being called with self.input = runcase = relevant RunCase
                        save_run_cases(dlc.name, allruns)

            self.ws_driver.iterator = ListCaseIterator(self.runcases)
 def setup_init(self):
     super(CIDriver, self).setup_init()
     inp = self.comp_name+'.x'
     out = self.comp_name+'.y'
     cases = []
     for i in range(self.max_iterations):
         cases.append(Case(inputs=[(inp, i)], outputs=[out]))
     Case.set_vartree_inputs(self, cases)
Пример #3
0
 def execute(self):
     inp = self.comp_name + '.x'
     out = self.comp_name + '.y'
     cases = []
     for i in range(self.max_iterations):
         cases.append(Case(inputs=[(inp, i)], outputs=[out]))
     Case.set_vartree_inputs(self, cases)
     super(CIDriver, self).execute()
 def test_ei_2obj(self):
     ei = MultiObjExpectedImprovement()
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",1),("y2",10)]),Case(outputs=[("y1",1),("y2",-10)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests
     ei.criteria = ["y1","y2"]
     ei.predicted_values = [NormalDistribution(mu=1,sigma=1),NormalDistribution(mu=0,sigma=1)]
     ei.calc_switch = "EI"
     ei.execute()
     self.assertAlmostEqual([5.0],ei.EI,1)
     self.assertEqual(0.5,ei.PI,6)
Пример #5
0
    def _next_case(self):
        """ Generator which returns Cases one at a time. """
        uuid = parent_uuid = msg = ""
        uuid_field = parent_uuid_field = msg_field = None
        if self.headers is None:
            input_fields = {}
        else:
            input_fields = self.headers
        output_fields = {}

        for row in self.data:

            # Get fieldnames from file
            if self._need_fieldnames:

                # OpenMDAO-style CSV file
                if '/INPUTS' in row:
                    input_fields, output_fields = self._parse_fieldnames(row)

                    self.timestamp_field = 1
                    uuid_field = row.index('/METADATA') + 1
                    parent_uuid_field = uuid_field + 1
                    msg_field = uuid_field + 2

                # Read headers from file
                elif self.headers is None:
                    for i, field in enumerate(row):
                        if field == 'timestamp':
                            self.timestamp_field = i
                        else:
                            input_fields[i] = field

                self._need_fieldnames = False
                continue

            if uuid_field is not None:
                uuid = row[uuid_field]
                parent_uuid = row[parent_uuid_field]
                msg = row[msg_field]

            inputs = []
            for i, field in input_fields.iteritems():

                # Convert bools from string back into bools
                # Note, only really need this for inputs.
                if row[i] in ['True', 'False']:
                    row[i] = bool(row[i])

                inputs.append((field, row[i]))

            outputs = []
            for i, field in output_fields.iteritems():
                outputs.append((field, row[i]))

            exc = None if not msg else Exception(msg)

            yield Case(inputs=inputs, outputs=outputs, exc=exc,
                       parent_uuid=parent_uuid)

        self._need_fieldnames = True
Пример #6
0
    def _get_cases(self):
        self.DOEgenerator.num_parameters = self.total_parameters()

        upper = self.get_upper_bounds()
        lower = self.get_lower_bounds()
        P = self.eval_parameters()
        M = (P - lower) / (upper - lower)

        for row in list(self.DOEgenerator) + [tuple(M)]:
            delta_low = P - lower
            k_low = 1.0 / (1.0 + (1 - self.beta) * delta_low)
            new_low = P - self.alpha * k_low * delta_low  #/(self.exec_count+1)

            delta_high = upper - P
            k_high = 1.0 / (1.0 + (1 - self.beta) * delta_high)
            new_high = P + self.alpha * k_high * delta_high  #/(self.exec_count+1)

            vals = new_low + (new_high - new_low) * row

            case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
            # now add events
            for varname in self.get_events():

                case.add_input(varname, True)
            case.add_outputs(self.case_outputs)

            yield case
Пример #7
0
    def _get_cases(self):
        """Generate each case."""
        self.DOEgenerator.num_parameters = self.total_parameters()
        record_doe = self.record_doe
        events = self.get_events()
        outputs = self.case_outputs
        case_filter = self.case_filter

        if record_doe:
            if not self.doe_filename:
                self.doe_filename = '%s.csv' % self.name
            self._csv_file = open(self.doe_filename, 'wb')
            csv_writer = csv.writer(self._csv_file)

        lower = self.get_lower_bounds()
        delta = self.get_upper_bounds() - lower

        for i, row in enumerate(self.DOEgenerator):
            if record_doe:
                csv_writer.writerow(['%.16g' % val for val in row])
            vals = lower + delta * row
            case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
            # now add events
            for varname in events:
                case.add_input(varname, True)
            case.add_outputs(outputs)
            if case_filter is None or case_filter.select(i, case):
                yield case
Пример #8
0
 def record(self, src, inputs, outputs, err, case_uuid, parent_uuid):
     in_names, out_names = self._name_map[src]
     inputs = zip(in_names, inputs)
     outputs = zip(out_names, outputs)
     self.cases.append(
         Case(inputs, outputs, case_uuid=case_uuid,
              parent_uuid=parent_uuid))
Пример #9
0
    def _get_cases(self):
        """Generate each case."""
        params = self.get_parameters().values()
        self.DOEgenerator.num_parameters = len(params)
        record_doe = self.record_doe
        events = self.get_events()
        outputs = self.case_outputs
        case_filter = self.case_filter

        if record_doe:
            if not self.doe_filename:
                self.doe_filename = '%s.csv' % self.name
            self._csv_file = open(self.doe_filename, 'wb')
            csv_writer = csv.writer(self._csv_file)

        for i, row in enumerate(self.DOEgenerator):
            if record_doe:
                csv_writer.writerow(['%.16g' % val for val in row])
            vals = [
                p.low + (p.high - p.low) * val for p, val in zip(params, row)
            ]
            case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
            # now add events
            for varname in events:
                case.add_input(varname, True)
            case.add_outputs(outputs)
            if case_filter is None or case_filter.select(i, case):
                yield case
Пример #10
0
 def _case_from_values(self, values):
     return Case(inputs=[(n, v) for n, v in zip(
         self._names[0:self._split_idx], values[0:self._split_idx])],
                 outputs=[(n, v)
                          for n, v in zip(self._names[self._split_idx:],
                                          values[self._split_idx:])],
                 parent_uuid=self._parent_uuid)
Пример #11
0
    def _get_cases(self):
        params = self.get_parameters().values()
        self.DOEgenerator.num_parameters = len(params)

        M = []
        P = []

        for p in params:
            temp = p.evaluate()
            P.append(temp)
            M.append((temp - p.low) / (p.high - p.low))

        for row in list(self.DOEgenerator) + [tuple(M)]:
            vals = []
            for p, val, curval in zip(params, row, P):
                delta_low = curval - p.low
                k_low = 1.0 / (1.0 + (1 - self.beta) * delta_low)
                new_low = curval - self.alpha * k_low * delta_low  #/(self.exec_count+1)

                delta_high = p.high - curval
                k_high = 1.0 / (1.0 + (1 - self.beta) * delta_high)
                new_high = curval + self.alpha * k_high * delta_high  #/(self.exec_count+1)

                newval = new_low + (new_high - new_low) * val

                vals.append(newval)

            case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
            # now add events
            for varname in self.get_events():

                case.add_input(varname, True)
            case.add_outputs(self.case_outputs)

            yield case
 def test_ei_zero_division(self):
     ei = ExpectedImprovement()
     ei.best_case = CaseSet(Case(outputs=[("y", 1)]))
     ei.criteria = "y"
     ei.predicted_value = NormalDistribution(mu=1, sigma=0)
     ei.execute()
     self.assertEqual(0, ei.EI)
     self.assertEqual(0, ei.PI)
Пример #13
0
    def _get_cases(self):
        """Iterator over the cases"""

        for row in self.distribution_generator:
            case = self.set_parameters(row, Case(parent_uuid=self._case_id))
            case.add_outputs(self.case_outputs)

            yield case
 def test_ei(self):
     ei = ExpectedImprovement()
     ei.best_case = CaseSet(Case(outputs=[("y", 1)]))
     ei.criteria = "y"
     ei.predicted_value = NormalDistribution(mu=1, sigma=1)
     ei.execute()
     self.assertAlmostEqual([0.91], ei.EI, 2)
     self.assertAlmostEqual(0.5, ei.PI, 6)
Пример #15
0
    def run_iteration(self, case_uuid=None):
        """Runs workflow."""
        wf = self.workflow
        if not wf._ordering:
            self._logger.warning("'%s': workflow is empty!" %
                                 self.get_pathname())

        if not wf._system.is_active():
            return

        self._stop = False
        self.workflow._exec_count += 1

        iterbase = wf._iterbase()

        if not case_uuid:
            # We record the case and are responsible for unique case ids.
            record_case = True
            case_uuid = Case.next_uuid()
        else:
            record_case = False

        err = None
        try:
            uvec = wf._system.vec['u']
            fvec = wf._system.vec['f']

            if wf._need_prescatter:
                wf._system.scatter('u', 'p')

            # save old value of u to compute resids
            for node in wf._cycle_vars:
                fvec[node][:] = uvec[node][:]

            wf._system.run(iterbase=iterbase, case_uuid=case_uuid)

            # update resid vector for cyclic vars
            for node in wf._cycle_vars:
                fvec[node][:] -= uvec[node][:]

            if self._stop:
                raise RunStopped('Stop requested')
        except Exception:
            err = sys.exc_info()

        if record_case and wf._rec_required:
            try:
                wf._record_case(case_uuid, err)
            except Exception as exc:
                if err is None:
                    err = sys.exc_info()
                self._logger.error("Can't record case: %s", exc)

        # reraise exception with proper traceback if one occurred
        if err is not None:
            # NOTE: cannot use 'raise err' here for some reason.  Must separate
            # the parts of the tuple.
            raise err[0], err[1], err[2]
Пример #16
0
    def run_iteration(self, case_uuid=None):
        """Runs workflow."""
        wf = self.workflow
        if not wf._ordering:
            self._logger.warning("'%s': workflow is empty!"
                                 % self.get_pathname())

        if not wf._system.is_active():
            return

        self._stop = False
        self.workflow._exec_count += 1

        iterbase = wf._iterbase()

        if not case_uuid:
            # We record the case and are responsible for unique case ids.
            record_case = True
            case_uuid = Case.next_uuid()
        else:
            record_case = False

        err = None
        try:
            uvec = wf._system.vec['u']
            fvec = wf._system.vec['f']

            if wf._need_prescatter:
                wf._system.scatter('u', 'p')

            # save old value of u to compute resids
            for node in wf._cycle_vars:
                fvec[node][:] = uvec[node][:]

            wf._system.run(iterbase=iterbase, case_uuid=case_uuid)

            # update resid vector for cyclic vars
            for node in wf._cycle_vars:
                fvec[node][:] -= uvec[node][:]

            if self._stop:
                raise RunStopped('Stop requested')
        except Exception:
            err = sys.exc_info()

        if record_case and wf._rec_required:
            try:
                wf._record_case(case_uuid, err)
            except Exception as exc:
                if err is None:
                    err = sys.exc_info()
                self._logger.error("Can't record case: %s", exc)

        # reraise exception with proper traceback if one occurred
        if err is not None:
            # NOTE: cannot use 'raise err' here for some reason.  Must separate
            # the parts of the tuple.
            raise err[0], err[1], err[2]
Пример #17
0
    def _next_case(self):
        """ Generator which returns Cases one at a time. """
        # figure out which selectors are for cases and which are for variables
        sql = ["SELECT * FROM cases"]
        if self.selectors is not None:
            for sel in self.selectors:
                rhs, rel, lhs = _query_split(sel)
                if rhs in _casetable_attrs:
                    if len(sql) == 1:
                        sql.append("WHERE %s%s%s" % (rhs, rel, lhs))
                    else:
                        sql.append("AND %s%s%s" % (rhs, rel, lhs))

        casecur = self._connection.cursor()
        casecur.execute(' '.join(sql))

        sql = [
            'SELECT var_id,name,case_id,sense,value from casevars WHERE case_id=%s'
        ]
        if self.selectors is not None:
            for sel in self.selectors:
                rhs, rel, lhs = _query_split(sel)
                if rhs in _vartable_attrs:
                    sql.append("AND %s%s%s" % (rhs, rel, lhs))
        combined = ' '.join(sql)
        varcur = self._connection.cursor()

        for cid, text_id, parent, msg, model_id, timeEnter in casecur:
            varcur.execute(combined % cid)
            inputs = []
            outputs = []
            for var_id, vname, case_id, sense, value in varcur:
                if isinstance(value, (float, int, str)):
                    pass
                elif value is None:  # Result when recorded value was NaN.
                    value = float('NaN')
                else:
                    try:
                        value = loads(str(value))
                    except UnpicklingError as err:
                        print 'value', type(value), repr(value)
                        raise UnpicklingError("can't unpickle value '%s' for"
                                              " case '%s' from database: %s" %
                                              (vname, text_id, str(err)))
                if sense == 'i':
                    inputs.append((vname, value))
                elif sense == 'o':
                    outputs.append((vname, value))
            if len(inputs) > 0 or len(outputs) > 0:
                exc = Exception(msg) if msg else None
                yield Case(inputs=inputs,
                           outputs=outputs,
                           exc=exc,
                           case_uuid=text_id,
                           parent_uuid=parent)
Пример #18
0
 def test_reset_y_star_event(self):
     ei = MultiObjExpectedImprovement(3)
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests
     ei.criteria = ['y1','y2','y3']
     ei.predicted_values = [NormalDistribution(mu=1,sigma=1),
                                                 NormalDistribution(mu=1,sigma=1),
                                                 NormalDistribution(mu=1,sigma=1)]
     ei.execute()
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",2),("y2",2),("y3",2)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests        
     ei.reset_y_star = True
     ei.execute()
     self.assertEqual(ei.y_star.all(),array([2,2,2]).all())
 def test_ei_bad_criteria(self):
     ei = ExpectedImprovement()
     ei.best_case = CaseSet(Case(outputs=[("y", 1)]))
     ei.criteria = "x"
     ei.predicted_value = NormalDistribution(mu=1, sigma=1)
     try:
         ei.execute()
     except ValueError, err:
         self.assertEqual(
             str(err), ": best_case did not have an output which "
             "matched the criteria, 'x'")
 def test_ei_nobj(self):
     ei = MultiObjExpectedImprovement()
     ei.target = array([[1, 1, 1]])
     list_of_cases = [Case(outputs=[("y1", 1), ("y2", 1), ("y3", 1)])]
     ei.criteria = ['y1', 'y2', 'y3']
     ei.current = [
         NormalDistribution(mu=1, sigma=1),
         NormalDistribution(mu=1, sigma=1),
         NormalDistribution(mu=1, sigma=1)
     ]
     ei.execute()
     self.assertAlmostEqual(0.875, ei.PI, 1)
Пример #21
0
 def _get_cases(self):
     params = self.get_parameters().values()
     self.DOEgenerator.num_parameters = len(params)
     
     for row in self.DOEgenerator:
         vals = [p.low+(p.high-p.low)*val for p,val in zip(params,row)]
         case = self.set_parameters(vals, Case(parent_uuid=self._case_id))
         # now add events
         for varname in self.get_events(): 
             case.add_input(varname,True)
         case.add_outputs(self.case_outputs)    
         
         yield case
Пример #22
0
 def test_ei_nobj(self):
     ei = MultiObjExpectedImprovement(3)
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests
     ei.criteria = ['y1','y2','y3']
     ei.predicted_values = [NormalDistribution(mu=1,sigma=1),
                            NormalDistribution(mu=1,sigma=1),
                            NormalDistribution(mu=1,sigma=1)]
     ei.execute()
     self.assertAlmostEqual(0.875,ei.PI,1)
    def test_2d_filter2(self):
        pf = ParetoFilter()
        x = [1,1,2,2,2,3,3,3,]
        y = [2,3,1,2,3,1,2,3]
        cases = []
        for x_0,y_0 in zip(x,y):
            cases.append(Case(outputs=[("x",x_0),("y",y_0)]))
        
        pf.case_sets = [ListCaseIterator(cases),]
        pf.criteria = ['x','y']
        pf.execute()

        x_p,y_p = zip(*[(case['x'],case['y']) for case in pf.pareto_set])
 def test_1d_filter(self):
     pf = ParetoFilter()
     x = [1,2,3,4,5,6,7,8,9,10]
     cases = [Case(outputs=[("x",x_0)]) for x_0 in x]
     pf.case_sets = [ListCaseIterator(cases),]
     pf.criteria = ['x']
     pf.execute()
    
     x_p = [case['x'] for case in pf.pareto_set]
     x_dom = [case['x'] for case in pf.dominated_set]
     
     self.assertEqual([1],x_p)
     self.assertEqual([2,3,4,5,6,7,8,9,10],x_dom)
Пример #25
0
    def _next_case(self):
        """ Generator which returns Cases one at a time. """
        # figure out which selectors are for cases and which are for variables
        sql = ["SELECT * FROM cases"]
        if self.selectors is not None:
            for sel in self.selectors:
                rhs, rel, lhs = _query_split(sel)
                if rhs in _casetable_attrs:
                    if len(sql) == 1:
                        sql.append("WHERE %s%s%s" % (rhs, rel, lhs))
                    else:
                        sql.append("AND %s%s%s" % (rhs, rel, lhs))

        casecur = self._connection.cursor()
        casecur.execute(' '.join(sql))

        sql = [
            'SELECT var_id,name,case_id,sense,value from casevars WHERE case_id=%s'
        ]
        if self.selectors is not None:
            for sel in self.selectors:
                rhs, rel, lhs = _query_split(sel)
                if rhs in _vartable_attrs:
                    sql.append("AND %s%s%s" % (rhs, rel, lhs))
        combined = ' '.join(sql)
        varcur = self._connection.cursor()

        for cid, text_id, parent, label, msg, retries, model_id, timeEnter in casecur:
            varcur.execute(combined % cid)
            inputs = []
            outputs = []
            for var_id, vname, case_id, sense, value in varcur:
                if not isinstance(value, (float, int, str)):
                    try:
                        value = loads(str(value))
                    except UnpicklingError as err:
                        raise UnpicklingError(
                            "can't unpickle value '%s' for case '%s' from database: %s"
                            % (vname, cname, str(err)))
                if sense == 'i':
                    inputs.append((vname, value))
                else:
                    outputs.append((vname, value))
            if len(inputs) > 0 or len(outputs) > 0:
                yield Case(inputs=inputs,
                           outputs=outputs,
                           retries=retries,
                           msg=msg,
                           label=label,
                           case_uuid=text_id,
                           parent_uuid=parent)
Пример #26
0
 def test_bad_criteria(self):
     ei = MultiObjExpectedImprovement(2)
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",1),("y2",1)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests
     ei.criteria = ['y1','y3']
     ei.predicted_values = [NormalDistribution(mu=1,sigma=1),
                                                 NormalDistribution(mu=1,sigma=1)]
     try:
         ei.execute()
     except ValueError,err:
         self.assertEqual(str(err),": no cases in the provided case_set"
                             " had output matching the provided criteria, ['y1' 'y3']")
 def test_bad_case_set(self): 
     pf = ParetoFilter()
     x = [1,1,2,2,2,3,3,3,]
     y = [2,3,1,2,3,1,2,3]
     cases = []
     for x_0,y_0 in zip(x,y):
         cases.append(Case(outputs=[("x",x_0),("y",y_0)]))
         
     pf.case_sets = [ListCaseIterator(cases),]
     pf.criteria = ['z','w']
     try:
         pf.execute()
     except ValueError,err: 
         self.assertEqual(str(err),": no cases provided had all of the outputs "
              "matching the provided criteria, ['z', 'w']")
Пример #28
0
 def test_ei_calc_switch(self):
     ei = MultiObjExpectedImprovement(3)
     bests = CaseSet()
     list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])]
     for case in list_of_cases:
         bests.record(case)
     ei.best_cases = bests
     ei.criteria = ['y1','y2','y3']
     ei.predicted_values = [NormalDistribution(mu=1,sigma=1),
                            NormalDistribution(mu=1,sigma=1),
                            NormalDistribution(mu=1,sigma=1)]
     ei.calc_switch = 'EI'
     try:
         ei.execute()
     except ValueError,err:
         self.assertEqual(str(err),': EI calculations not supported'
                                         ' for more than 2 objectives')
Пример #29
0
    def configure(self):
        self.add('driver', CaseIteratorDriver())
        self.add('runner', PGrafSubComponent())
        self.driver.workflow.add('runner')
        self.driver.sequential = False
        # uncomment to keep simulation directories for debugging purposes
        #import os
        #os.environ['OPENMDAO_KEEPDIRS'] = '1'

        cases = []
        for num in range(4):
            cases.append(
                Case(inputs=[('runner.obj', PGrafObject(num)),
                             ('runner.num', num)],
                     outputs=['runner.result']))

        self.driver.iterator = ListCaseIterator(cases)
        self.driver.recorders = [ListCaseRecorder()]
Пример #30
0
    def run(self, ffd_order=0, case_uuid=None):
        """ Run the Components in this Workflow. """
        self._stop = False
        self._exec_count += 1

        iterbase = self._iterbase()

        if case_uuid is None:
            # We record the case and are responsible for unique case ids.
            record_case = True
            case_uuid = Case.next_uuid()
        else:
            record_case = False

        err = None
        scope = self.scope
        try:
            for comp in self:
                # before the workflow runs each component, update that
                # component's inputs based on the graph
                scope.update_inputs(comp.name, graph=self._var_graph)
                if isinstance(comp, PseudoComponent):
                    comp.run(ffd_order=ffd_order)
                else:
                    comp.set_itername('%s-%s' % (iterbase, comp.name))
                    comp.run(ffd_order=ffd_order, case_uuid=case_uuid)
                if self._stop:
                    raise RunStopped('Stop requested')
        except Exception:
            err = sys.exc_info()

        if record_case and self._rec_required:
            try:
                self._record_case(case_uuid, err)
            except Exception as exc:
                if err is None:
                    err = sys.exc_info()
                self.parent._logger.error("Can't record case: %s", exc)

        # reraise exception with proper traceback if one occurred
        if err is not None:
            # NOTE: cannot use 'raise err' here for some reason.  Must separate
            # the parts of the tuple.
            raise err[0], err[1], err[2]
Пример #31
0
    def run(self, ffd_order=0, case_uuid=None):
        """ Run the Components in this Workflow. """
        self._stop = False
        self._exec_count += 1

        iterbase = self._iterbase()

        if case_uuid is None:
            # We record the case and are responsible for unique case ids.
            record_case = True
            case_uuid = Case.next_uuid()
        else:
            record_case = False

        err = None
        scope = self.scope
        try:
            for comp in self:
                # before the workflow runs each component, update that
                # component's inputs based on the graph
                scope.update_inputs(comp.name, graph=self._var_graph)
                if isinstance(comp, PseudoComponent):
                    comp.run(ffd_order=ffd_order)
                else:
                    comp.set_itername('%s-%s' % (iterbase, comp.name))
                    comp.run(ffd_order=ffd_order, case_uuid=case_uuid)
                if self._stop:
                    raise RunStopped('Stop requested')
        except Exception:
            err = sys.exc_info()

        if record_case and self._rec_required:
            try:
                self._record_case(case_uuid, err)
            except Exception as exc:
                if err is None:
                    err = sys.exc_info()
                self.parent._logger.error("Can't record case: %s", exc)

        # reraise exception with proper traceback if one occurred
        if err is not None:
            # NOTE: cannot use 'raise err' here for some reason.  Must separate
            # the parts of the tuple.
            raise err[0], err[1], err[2]
Пример #32
0
    def run(self, ffd_order=0, case_uuid=None):
        """ Run the Components in this Workflow. """
        self._stop = False
        self._exec_count += 1

        iterbase = self._iterbase()

        if case_uuid is None:
            # We record the case and are responsible for unique case ids.
            record_case = True
            case_uuid = Case.next_uuid()
        else:
            record_case = False

        err = None
        scope = self.scope
        try:
            for comp in self:
                # before the workflow runs each component, update that
                # component's inputs based on the graph
                scope.update_inputs(comp.name, graph=self._var_graph)
                if isinstance(comp, PseudoComponent):
                    comp.run(ffd_order=ffd_order)
                else:
                    comp.set_itername('%s-%s' % (iterbase, comp.name))
                    comp.run(ffd_order=ffd_order, case_uuid=case_uuid)
                if self._stop:
                    raise RunStopped('Stop requested')
        except Exception as exc:
            err = TracedError(exc, format_exc())

        if record_case and self._rec_required:
            try:
                self._record_case(case_uuid, err)
            except Exception as exc:
                if err is None:
                    err = TracedError(exc, format_exc())
                self.parent._logger.error("Can't record case: %s", exc)

        if err is not None:
            err.reraise(with_traceback=False)
Пример #33
0
    def _next_case(self):
        """ Generator which returns Cases one at a time. """

        # Default case label for external csv files that don't have labels.
        label = "External Case"

        retries = max_retries = 0
        parent_uuid = msg = ""
        retries_field = None
        if self.headers is None:
            input_fields = {}
        else:
            input_fields = self.headers
        output_fields = {}

        for row in self.data:

            # Get fieldnames from file
            if self.need_fieldnames:

                # OpenMDAO-style CSV file
                if row[1] == '/INPUTS':

                    input_fields, output_fields = self._parse_fieldnames(row)

                    self.label_field = 0
                    retries_field = row.index('/METADATA') + 1
                    max_retries_field = retries_field + 1
                    parent_uuid_field = retries_field + 2
                    msg_field = retries_field + 3

                # Read headers from file
                elif self.headers is None:
                    for i, field in enumerate(row):
                        if field == 'label':
                            self.label_field = i
                        else:
                            input_fields[i] = field

                self.need_fieldnames = False
                continue

            if self.label_field is not None:
                label = row[self.label_field]

            if retries_field is not None:
                retries = row[retries_field]
                max_retries = row[max_retries_field]
                parent_uuid = row[parent_uuid_field]
                msg = row[msg_field]

                # For some reason, default for these in a case is None
                if not retries:
                    retries = None
                if not max_retries:
                    max_retries = None

            inputs = []
            for i, field in input_fields.iteritems():
                inputs.append((field, row[i]))

            outputs = []
            for i, field in output_fields.iteritems():
                outputs.append((field, row[i]))

            yield Case(inputs=inputs, outputs=outputs, label=label, \
                       retries=retries, max_retries=max_retries, \
                       parent_uuid=parent_uuid, msg=msg)
Пример #34
0
 def record(self, driver, inputs, outputs, exc, case_uuid, parent_uuid):
     """Store the case in our internal list."""
     in_names, out_names = self._cfg_map[driver]
     self.cases.append(
         Case(zip(in_names, inputs), zip(out_names, outputs), exc,
              case_uuid, parent_uuid))
    def execute(self):
        """ General Newton's method. """

        if MPI:
            if self.workflow._system.mpi.comm == MPI.COMM_NULL:
                return

        system = self.workflow._system
        options = self.gradient_options
        fvec = system.vec['f']
        dfvec = system.vec['df']
        uvec = system.vec['u']
        iterbase = self.workflow._iterbase()
        nstring = 'NEWTON'

        # perform an initial run
        system.evaluate(iterbase, case_uuid=Case.next_uuid())

        f_norm = get_norm(fvec)

        f_norm0 = f_norm

        if self.iprint > 0:
            self.print_norm(nstring, 0, f_norm, f_norm0)

        itercount = 0
        alpha = self.alpha
        while itercount < self.max_iteration and f_norm > self.atol and \
              f_norm/f_norm0 > self.rtol:

            system.calc_newton_direction(options=options)

            #print "LS 1", uvec.array, '+', dfvec.array
            uvec.array += alpha*dfvec.array

            # Just evaluate the model with the new points
            system.evaluate(iterbase, case_uuid=Case.next_uuid())

            f_norm = get_norm(fvec)
            if self.iprint > 0:
                self.print_norm(nstring, itercount+1, f_norm, f_norm0)

            itercount += 1
            ls_itercount = 0

            # Backtracking Line Search
            while ls_itercount < self.ls_max_iteration and \
                  f_norm > self.ls_atol and \
                  f_norm/f_norm0 > self.ls_rtol:

                alpha *= 0.5
                uvec.array -= alpha*dfvec.array

                # Just evaluate the model with the new points
                system.evaluate(iterbase, case_uuid=Case.next_uuid())

                f_norm = get_norm(fvec)
                if self.iprint> 1:
                    self.print_norm('BK_TKG', itercount+1,
                                    f_norm, f_norm/f_norm0,
                                    indent=1, solver='LS')

                ls_itercount += 1

            # Reset backtracking
            alpha = self.alpha

        # Need to make sure the whole workflow is executed at the final
        # point, not just evaluated.
        self.pre_iteration()
        self.run_iteration()
        self.post_iteration()

        if self.iprint > 0:
            self.print_norm(nstring, itercount, f_norm, f_norm0, msg='Converged')