Ejemplo n.º 1
0
 def _deserialize_or_create_runlist(self):
     runlist = None
     if self.use_restart:
         runlist = RestartRecorder.deserialize_runlist(self.original_dir)
     if not runlist:
         runlist = [list(run) for run in self._build_runlist()]
     if self.use_restart:
         RestartRecorder.serialize_runlist(self.original_dir, runlist, self._num_par_doe)
     return runlist
Ejemplo n.º 2
0
 def _deserialize_or_create_runlist(self):
     runlist = None
     if self.use_restart:
         runlist = RestartRecorder.deserialize_runlist(self.original_dir)
     if not runlist:
         runlist = [list(run) for run in self._build_runlist()]
     if self.use_restart:
         RestartRecorder.serialize_runlist(self.original_dir, runlist,
                                           self._num_par_doe)
     return runlist
Ejemplo n.º 3
0
    def add_recorders():
        recorders = []
        design_var_map = {get_desvar_path(designVariable): designVariable for designVariable in driver['designVariables']}
        objective_map = {'{}.{}'.format(objective['source'][0], objective['source'][1]): objective_name for objective_name, objective in six.iteritems(driver['objectives'])}
        intermediate_var_map = {'{}.{}'.format(intermediate_var['source'][0], intermediate_var['source'][1]): intermediate_var_name for intermediate_var_name, intermediate_var in six.iteritems(driver.get('intermediateVariables', {}))}
        constants_map = {}
        for name, constant in (c for c in six.iteritems(mdao_config['components']) if c[1].get('type', 'TestBenchComponent') == 'IndepVarComp'):
            constants_map.update({'{}.{}'.format(name, unknown): unknown for unknown in constant['unknowns']})

        constraints_map = {'{}.{}'.format(constraint['source'][0], constraint['source'][1]): constraint_name for constraint_name, constraint in six.iteritems(driver.get('constraints', {})) if constraint['source'][0] not in mdao_config['drivers']} # All constraints that don't point back to design variables

        unknowns_map = design_var_map
        unknowns_map.update(objective_map)
        unknowns_map.update(intermediate_var_map)
        unknowns_map.update(constants_map)
        unknowns_map.update(constraints_map)
        for recorder in mdao_config.get('recorders', [{'type': 'DriverCsvRecorder', 'filename': 'output.csv'}]):
            if recorder['type'] == 'DriverCsvRecorder':
                mode = 'wb'
                if RestartRecorder.is_restartable(original_dir):
                    mode = 'ab'
                recorder = MappingCsvRecorder({}, unknowns_map, io.open(recorder['filename'], mode))
                if mode == 'ab':
                    recorder._wrote_header = True
            elif recorder['type'] == 'AllCsvRecorder':
                mode = 'wb'
                recorder = CsvRecorder(out=open(recorder['filename'], mode))
            elif recorder['type'] == 'CouchDBRecorder':
                recorder = CouchDBRecorder(recorder.get('url', 'http://localhost:5984/'), recorder['run_id'])
                recorder.options['record_params'] = True
                recorder.options['record_unknowns'] = True
                recorder.options['record_resids'] = False
                recorder.options['includes'] = list(unknowns_map.keys())
            else:
                mod_name = '.'.join(recorder['type'].split('.')[:-1])
                class_name = recorder['type'].split('.')[-1]
                recorder = getattr(importlib.import_module(mod_name), class_name)()

            top.driver.add_recorder(recorder)
        return recorders
Ejemplo n.º 4
0
 def _setup_communicators(self, comm, parent_dir):
     super(PredeterminedRunsDriver,
           self)._setup_communicators(comm, parent_dir)
     if self.use_restart:
         self.restart = RestartRecorder(self.original_dir, comm)
Ejemplo n.º 5
0
class PredeterminedRunsDriver(openmdao.api.PredeterminedRunsDriver):
    def __init__(self, original_dir, num_samples=None, *args, **kwargs):
        if type(self) == PredeterminedRunsDriver:
            raise Exception('PredeterminedRunsDriver is an abstract class')
        if MPI:
            comm = MPI.COMM_WORLD
            kwargs.setdefault('num_par_doe', comm.Get_size())
        else:
            kwargs.setdefault('num_par_doe', 1)
        super(PredeterminedRunsDriver, self).__init__(*args, **kwargs)
        self.supports['gradients'] = False
        self.original_dir = original_dir

        # Make sure self.original_dir exists (if this was instantiated from a subproblem, it might not)
        try:
            os.makedirs(self.original_dir)
        except OSError as e:
            if e.errno == errno.EEXIST and os.path.isdir(self.original_dir):
                pass
            else:
                raise

        self.use_restart = True

    def _setup_communicators(self, comm, parent_dir):
        super(PredeterminedRunsDriver,
              self)._setup_communicators(comm, parent_dir)
        if self.use_restart:
            self.restart = RestartRecorder(self.original_dir, comm)

    def run(self, problem):
        """Build a runlist and execute the Problem for each set of generated parameters."""
        self.iter_count = 0

        if MPI and self._num_par_doe > 1:
            runlist = self._distrib_build_runlist()
        else:
            runlist = self._deserialize_or_create_runlist()

        testbenchexecutor.progress_service.update_progress(
            "Iteration 0/{} completed".format(len(runlist)), 0, len(runlist))

        # For each runlist entry, run the system and record the results
        for run in runlist:
            run_success = self.run_one(problem, run)
            testbenchexecutor.progress_service.update_progress(
                "Iteration {}/{} {}".format(
                    self.iter_count, len(runlist),
                    "completed" if run_success else "failed"), self.iter_count,
                len(runlist))

        if self.use_restart:
            self.restart.close()

    def run_one(self, problem, run):
        run_success = True

        for dv_name, dv_val in run:
            self.set_desvar(dv_name, dv_val)

        metadata = create_local_meta(None, 'Driver')

        update_local_meta(metadata, (self.iter_count, ))

        try:
            problem.root.solve_nonlinear(metadata=metadata)
        except AnalysisError:
            metadata['msg'] = traceback.format_exc()
            metadata['success'] = 0
            run_success = False
        self.recorders.record_iteration(problem.root, metadata)
        self.iter_count += 1
        if self.use_restart:
            self.restart.record_iteration()

        return run_success

    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm
        job_list = None
        if comm.rank == 0:
            debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [
                list(case) for case in self._deserialize_or_create_runlist()
            ]  # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(
                self._num_par_doe, len(run_list))
            job_list = [
                run_list[o:o + s] for o, s in zip(run_offsets, run_sizes)
            ]
Ejemplo n.º 6
0
    def add_recorders():
        recorders = []
        design_var_map = {
            get_desvar_path(designVariable): designVariable
            for designVariable in driver['designVariables']
        }
        objective_map = {
            '{}.{}'.format(objective['source'][0], objective['source'][1]):
            objective_name
            for objective_name, objective in six.iteritems(
                driver['objectives'])
        }
        intermediate_var_map = {
            '{}.{}'.format(intermediate_var['source'][0],
                           intermediate_var['source'][1]):
            intermediate_var_name
            for intermediate_var_name, intermediate_var in six.iteritems(
                driver.get('intermediateVariables', {}))
        }
        constants_map = {}
        for name, constant in (
                c for c in six.iteritems(mdao_config['components'])
                if c[1].get('type', 'TestBenchComponent') == 'IndepVarComp'):
            constants_map.update({
                '{}.{}'.format(name, unknown): unknown
                for unknown in constant['unknowns']
            })

        constraints_map = {
            '{}.{}'.format(constraint['source'][0], constraint['source'][1]):
            constraint_name
            for constraint_name, constraint in six.iteritems(
                driver.get('constraints', {}))
            if constraint['source'][0] not in mdao_config['drivers']
        }  # All constraints that don't point back to design variables

        unknowns_map = defaultdict(list)

        def add_to_unknowns(map):
            for key, val in six.iteritems(map):
                unknowns_map[key].append(val)

        add_to_unknowns(design_var_map)
        add_to_unknowns(objective_map)
        add_to_unknowns(intermediate_var_map)
        add_to_unknowns(constants_map)
        add_to_unknowns(constraints_map)

        new_unknowns_map = defaultdict(list)
        # Locate/fix any unknowns that point to subproblem outputs
        for unknown_path, unknown_names in six.iteritems(unknowns_map):
            for unknown_name in unknown_names:
                split_path = unknown_path.split('.')
                if split_path[0] in subProblemOutputMeta:
                    split_path[1] = subProblemOutputMeta[split_path[0]][
                        split_path[1]]
                    new_path = '.'.join(split_path)
                    new_unknowns_map[new_path].append(unknown_name)
                else:
                    new_unknowns_map[unknown_path].append(unknown_name)

        unknowns_map = new_unknowns_map

        for recorder in mdao_config.get('recorders', [{
                'type': 'DriverCsvRecorder',
                'filename': 'output.csv'
        }]):
            if recorder['type'] == 'DriverCsvRecorder':
                mode = 'w'
                exists = os.path.isfile(recorder['filename'])
                if RestartRecorder.is_restartable(original_dir) or append_csv:
                    mode = 'a'
                if six.PY2:
                    mode += 'b'
                    open_kwargs = {}
                else:
                    open_kwargs = {'newline': ''}
                recorder = MappingCsvRecorder({},
                                              unknowns_map,
                                              io.open(recorder['filename'],
                                                      mode, **open_kwargs),
                                              include_id=recorder.get(
                                                  'include_id', False))
                if (append_csv and exists) or mode == 'ab':
                    recorder._wrote_header = True
            elif recorder['type'] == 'AllCsvRecorder':
                mode = 'w'
                if six.PY2:
                    mode += 'b'
                    open_kwargs = {}
                else:
                    open_kwargs = {'newline': ''}
                recorder = CsvRecorder(
                    out=io.open(recorder['filename'], mode, **open_kwargs))
            elif recorder['type'] == 'CouchDBRecorder':
                recorder = CouchDBRecorder(
                    recorder.get('url', 'http://localhost:5984/'),
                    recorder['run_id'])
                recorder.options['record_params'] = True
                recorder.options['record_unknowns'] = True
                recorder.options['record_resids'] = False
                recorder.options['includes'] = list(unknowns_map.keys())
            else:
                mod_name = '.'.join(recorder['type'].split('.')[:-1])
                class_name = recorder['type'].split('.')[-1]
                recorder = getattr(importlib.import_module(mod_name),
                                   class_name)()

            top.driver.add_recorder(recorder)
        return recorders
Ejemplo n.º 7
0
 def _setup_communicators(self, comm, parent_dir):
     super(PredeterminedRunsDriver, self)._setup_communicators(comm, parent_dir)
     if self.use_restart:
         self.restart = RestartRecorder(self.original_dir, comm)
Ejemplo n.º 8
0
class PredeterminedRunsDriver(openmdao.api.PredeterminedRunsDriver):

    def __init__(self, original_dir, num_samples=5, *args, **kwargs):
        if type(self) == PredeterminedRunsDriver:
            raise Exception('PredeterminedRunsDriver is an abstract class')
        if MPI:
            comm = MPI.COMM_WORLD
            kwargs.setdefault('num_par_doe', comm.Get_size())
        else:
            kwargs.setdefault('num_par_doe', 1)
        super(PredeterminedRunsDriver, self).__init__(*args, **kwargs)
        self.supports['gradients'] = False
        self.original_dir = original_dir
        self.use_restart = True

    def _setup_communicators(self, comm, parent_dir):
        super(PredeterminedRunsDriver, self)._setup_communicators(comm, parent_dir)
        if self.use_restart:
            self.restart = RestartRecorder(self.original_dir, comm)

    def run(self, problem):
        """Build a runlist and execute the Problem for each set of generated parameters."""
        self.iter_count = 0

        if MPI and self._num_par_doe > 1:
            runlist = self._distrib_build_runlist()
        else:
            runlist = self._deserialize_or_create_runlist()

        # For each runlist entry, run the system and record the results
        for run in runlist:
            self.run_one(problem, run)

        if self.use_restart:
            self.restart.close()

    def run_one(self, problem, run):
        for dv_name, dv_val in run:
            self.set_desvar(dv_name, dv_val)

        metadata = create_local_meta(None, 'Driver')

        update_local_meta(metadata, (self.iter_count,))

        try:
            problem.root.solve_nonlinear(metadata=metadata)
        except AnalysisError:
            metadata['msg'] = traceback.format_exc()
            metadata['success'] = 0
        self.recorders.record_iteration(problem.root, metadata)
        self.iter_count += 1
        if self.use_restart:
            self.restart.record_iteration()

    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm
        job_list = None
        if comm.rank == 0:
            debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [list(case) for case in self._deserialize_or_create_runlist()]  # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(self._num_par_doe,
                                                         len(run_list))
            job_list = [run_list[o:o+s] for o, s in zip(run_offsets,
                                                        run_sizes)]

        run_list = comm.scatter(job_list, root=0)
        debug('Number of DOE jobs: %s' % len(run_list))

        for case in run_list:
            yield case

    def _deserialize_or_create_runlist(self):
        runlist = None
        if self.use_restart:
            runlist = RestartRecorder.deserialize_runlist(self.original_dir)
        if not runlist:
            runlist = [list(run) for run in self._build_runlist()]
        if self.use_restart:
            RestartRecorder.serialize_runlist(self.original_dir, runlist, self._num_par_doe)
        return runlist