コード例 #1
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign a communicator to the root `System`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the Problem.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        root = self.root
        if self._num_par_doe < 1:
            raise ValueError(
                "'%s': _num_par_doe must be >= 1 but value is %s." %
                (self.pathname, self._num_par_doe))
        if not MPI:
            self._num_par_doe = 1

        self._full_comm = comm

        # figure out which parallel DOE we are associated with
        if self._num_par_doe > 1:
            minprocs, maxprocs = root.get_req_procs()
            if self._load_balance:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe - 1,
                                                     comm.size - 1)
                sizes = [1] + list(sizes)
                offsets = [0] + [o + 1 for o in offsets]
            else:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe,
                                                     comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []
            self._id_map = {}
            for i in range(self._num_par_doe):
                color.extend([i] * sizes[i])
                self._id_map[i] = (sizes[i], offsets[i])

            self._par_doe_id = color[comm.rank]

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace:
                debug('%s: splitting comm, doe_id=%s' % ('.'.join(
                    (root.pathname, 'driver')), self._par_doe_id))
            comm = comm.Split(self._par_doe_id)

        root._setup_communicators(comm, parent_dir)
コード例 #2
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign a communicator to the root `System`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the Problem.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        root = self.root
        if self._num_par_doe < 1:
            raise ValueError("'%s': _num_par_doe must be >= 1 but value is %s." %
                              (self.pathname, self._num_par_doe))
        if not MPI:
            self._num_par_doe = 1

        self._full_comm = comm

        # figure out which parallel DOE we are associated with
        if self._num_par_doe > 1:
            minprocs, maxprocs = root.get_req_procs()
            if self._load_balance:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe-1,
                                                     comm.size-1)
                sizes = [1]+list(sizes)
                offsets = [0]+[o+1 for o in offsets]
            else:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe,
                                                     comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []
            self._id_map = {}
            for i in range(self._num_par_doe):
                color.extend([i]*sizes[i])
                self._id_map[i] = (sizes[i], offsets[i])

            self._par_doe_id = color[comm.rank]

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace:
                debug('%s: splitting comm, doe_id=%s' % ('.'.join((root.pathname,
                                                               'driver')),
                                                    self._par_doe_id))
            comm = comm.Split(self._par_doe_id)

        root._setup_communicators(comm, parent_dir)
コード例 #3
0
 def deserialize_runlist(cls, original_dir):
     # FIXME check mdao_config.json mtime and against these
     if cls.is_restartable(original_dir):
         with open(
                 os.path.join(original_dir,
                              RestartRecorder.RESTART_PROGRESS_FILENAME)
         ) as restart_progress_file:
             restart_progress = json.load(restart_progress_file)
         runlist = []
         with open(
                 os.path.join(original_dir,
                              RestartRecorder.RESTART_RUNLIST_FILENAME),
                 'r') as restart_runlist:
             len_runlist = int(restart_runlist.readline())
             run_sizes, run_offsets = evenly_distrib_idxs(
                 len(restart_progress), len_runlist)
             already_done_ranges = [
                 (offset, offset + restart_progress[str(i)])
                 for i, offset in enumerate(run_offsets)
             ] + [(float("inf"), float("inf"))]
             reader = csv.reader(restart_runlist)
             header = next(iter(reader))
             for i, run in enumerate(reader):
                 while already_done_ranges[1][0] <= i:
                     already_done_ranges = already_done_ranges[1:]
                 if not already_done_ranges[0][
                         0] <= i < already_done_ranges[0][1]:
                     runlist.append([(field, run[j])
                                     for j, field in enumerate(header)])
         return runlist
コード例 #4
0
    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm

        # get the par_doe_id from every rank in the full comm so we know which
        # cases to scatter where
        doe_ids = comm.allgather(self._par_doe_id)

        job_list = None
        if comm.rank == 0:
            if trace:
                debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [list(case) for case in self._build_runlist()] # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(self._num_par_doe,
                                                         len(run_list))
            jobs = [run_list[o:o+s] for o, s in zip(run_offsets, run_sizes)]

            job_list = [jobs[i] for i in doe_ids]

        if trace: debug("scattering job_list: %s" % job_list)
        run_list = comm.scatter(job_list, root=0)
        if trace: debug('Number of DOE jobs: %s (scatter DONE)' % len(run_list))

        for case in run_list:
            yield case
コード例 #5
0
    def setup_distrib_idxs(self):
        """ component declares the local sizes and sets initial values
        for all distributed inputs and outputs. Returns a dict of
        index arrays keyed to variable names.
        """
        comm = self.comm
        rank = comm.rank

        sizes, offsets = evenly_distrib_idxs(comm.size, self.arr_size)
        start = offsets[rank]
        end = start + sizes[rank]

        for n, m in self._init_unknowns_dict.items():
            self.set_var_indices(n,
                                 val=numpy.ones(sizes[rank], float),
                                 src_indices=numpy.arange(start,
                                                          end,
                                                          dtype=int))

        for n, m in self._init_params_dict.items():
            self.set_var_indices(n,
                                 val=numpy.ones(sizes[rank], float),
                                 src_indices=numpy.arange(start,
                                                          end,
                                                          dtype=int))
コード例 #6
0
ファイル: parallel_fd_group.py プロジェクト: samtx/OpenMDAO1
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign communicator to this `Group` and all of its subsystems.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the parent system.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        if self._num_par_fds < 1:
            raise ValueError(
                "'%s': num_par_fds must be >= 1 but value is %s." %
                (self.pathname, self._num_par_fds))
        if not MPI:
            self._num_par_fds = 1

        self._full_comm = comm

        # figure out which parallel FD we are associated with
        if self._num_par_fds > 1:
            minprocs, maxprocs = super(ParallelFDGroup, self).get_req_procs()
            sizes, offsets = evenly_distrib_idxs(self._num_par_fds, comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []
            for i in range(self._num_par_fds):
                color.extend([i] * sizes[i])

            self._par_fd_id = color[comm.rank]

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace:
                debug('%s: splitting comm, fd_id=%s' %
                      (self.pathname, self._par_fd_id))
            comm = comm.Split(self._par_fd_id)

        self._local_subsystems = []

        self.comm = comm

        self._setup_dir(parent_dir)

        for sub in itervalues(self._subsystems):
            sub._setup_communicators(comm, self._sysdata.absdir)
            if self.is_active() and sub.is_active():
                self._local_subsystems.append(sub)
コード例 #7
0
    def setup_distrib_idxs(self):
        # this is called at the beginning of _setup_variables, so we can
        # add new params/unknowns here.
        comm = self.comm
        rank = comm.rank

        self.sizes, self.offsets = evenly_distrib_idxs(comm.size, self.arr_size)
        start = self.offsets[rank]
        end = start + self.sizes[rank]

        #need to initialize the param to have the correct local size
        self.set_var_indices('invec', val=np.ones(self.sizes[rank], float),
                             src_indices=np.arange(start, end, dtype=int))
コード例 #8
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign communicator to this `Group` and all of its subsystems.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the parent system.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        if self._num_par_fds < 1:
            raise ValueError("'%s': num_par_fds must be >= 1 but value is %s." %
                              (self.pathname, self._num_par_fds))
        if not MPI:
            self._num_par_fds = 1

        self._full_comm = comm

        # figure out which parallel FD we are associated with
        if self._num_par_fds > 1:
            minprocs, maxprocs = super(ParallelFDGroup, self).get_req_procs()
            sizes, offsets = evenly_distrib_idxs(self._num_par_fds, comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []
            for i in range(self._num_par_fds):
                color.extend([i]*sizes[i])

            self._par_fd_id = color[comm.rank]

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace:
                debug('%s: splitting comm, fd_id=%s' % (self.pathname,
                                                        self._par_fd_id))
            comm = comm.Split(self._par_fd_id)

        self._local_subsystems = []

        self.comm = comm

        self._setup_dir(parent_dir)

        for sub in itervalues(self._subsystems):
            sub._setup_communicators(comm, self._sysdata.absdir)
            if self.is_active() and sub.is_active():
                self._local_subsystems.append(sub)
コード例 #9
0
    def setup_distrib_idxs(self):
        # this is called at the beginning of _setup_variables, so we can
        # add new params/unknowns here.
        comm = self.comm
        rank = comm.rank

        self.sizes, self.offsets = evenly_distrib_idxs(comm.size,
                                                       self.arr_size)
        start = self.offsets[rank]
        end = start + self.sizes[rank]

        #need to initialize the param to have the correct local size
        self.set_var_indices('invec',
                             val=np.ones(self.sizes[rank], float),
                             src_indices=np.arange(start, end, dtype=int))
コード例 #10
0
    def setup_distrib_idxs(self):
        """ component declares the local sizes and sets initial values
        for all distributed inputs and outputs. Returns a dict of
        index arrays keyed to variable names.
        """
        comm = self.comm
        rank = comm.rank

        self.sizes, self.offsets = evenly_distrib_idxs(comm.size,
                                                       self.arr_size)
        start = self.offsets[rank]
        end = start + self.sizes[rank]

        #need to initialize the variable to have the correct local size
        self.set_var_indices('invec', val=np.ones(self.sizes[rank], float),
                             src_indices=np.arange(start, end, dtype=int))
コード例 #11
0
    def setup_distrib_idxs(self):
        """ component declares the local sizes and sets initial values
        for all distributed inputs and outputs. Returns a dict of
        index arrays keyed to variable names.
        """
        comm = self.comm
        rank = comm.rank

        sizes, offsets = evenly_distrib_idxs(comm.size, self.arr_size)
        start = offsets[rank]
        end = start + sizes[rank]

        for n, m in self._init_unknowns_dict.items():
            self.set_var_indices(n, val=numpy.ones(sizes[rank], float), src_indices=numpy.arange(start, end, dtype=int))

        for n, m in self._init_params_dict.items():
            self.set_var_indices(n, val=numpy.ones(sizes[rank], float), src_indices=numpy.arange(start, end, dtype=int))
コード例 #12
0
    def setup_distrib_idxs(self):
        """ component declares the local sizes and sets initial values
        for all distributed inputs and outputs. Returns a dict of
        index arrays keyed to variable names.
        """
        comm = self.comm
        rank = comm.rank

        self.sizes, self.offsets = evenly_distrib_idxs(comm.size,
                                                       self.arr_size)
        start = self.offsets[rank]
        end = start + self.sizes[rank]

        #need to initialize the variable to have the correct local size
        self.set_var_indices('invec',
                             val=np.ones(self.sizes[rank], float),
                             src_indices=np.arange(start, end, dtype=int))
コード例 #13
0
 def deserialize_runlist(cls, original_dir):
     # FIXME check mdao_config.json mtime and against these
     if cls.is_restartable(original_dir):
         with open(os.path.join(original_dir, RestartRecorder.RESTART_PROGRESS_FILENAME)) as restart_progress_file:
             restart_progress = json.load(restart_progress_file)
         runlist = []
         with open(os.path.join(original_dir, RestartRecorder.RESTART_RUNLIST_FILENAME), "r") as restart_runlist:
             len_runlist = int(restart_runlist.readline())
             run_sizes, run_offsets = evenly_distrib_idxs(len(restart_progress), len_runlist)
             already_done_ranges = [
                 (offset, offset + restart_progress[str(i)]) for i, offset in enumerate(run_offsets)
             ] + [(float("inf"), float("inf"))]
             reader = csv.reader(restart_runlist)
             header = next(iter(reader))
             for i, run in enumerate(reader):
                 while already_done_ranges[1][0] <= i:
                     already_done_ranges = already_done_ranges[1:]
                 if not already_done_ranges[0][0] <= i < already_done_ranges[0][1]:
                     runlist.append([(field, run[j]) for j, field in enumerate(header)])
         return runlist
コード例 #14
0
    def setup_distrib(self):
        """
        specify the local sizes of the variables and which specific indices this specific
        distributed component will handle. Indices do NOT need to be sequential or
        contiguous!
        """
        comm = self.comm
        rank = comm.rank

        # NOTE: evenly_distrib_idxs is a helper function to split the array
        #       up as evenly as possible
        sizes, offsets = evenly_distrib_idxs(comm.size, self.size)
        local_size, local_offset = sizes[rank], offsets[rank]
        self.local_size = int(local_size)

        start = local_offset
        end = local_offset + local_size

        self.set_var_indices('x', val=np.zeros(local_size, float),
            src_indices=np.arange(start, end, dtype=int))
        self.set_var_indices('y', val=np.zeros(local_size, float),
            src_indices=np.arange(start, end, dtype=int))
コード例 #15
0
    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm
        job_list = None
        if comm.rank == 0:
            debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [list(case) for case in self._build_runlist()] # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(self._num_par_doe,
                                                         len(run_list))
            job_list = [run_list[o:o+s] for o, s in zip(run_offsets,
                                                        run_sizes)]

        run_list = comm.scatter(job_list, root=0)
        debug('Number of DOE jobs: %s' % len(run_list))

        for case in run_list:
            yield case
コード例 #16
0
    def setup_distrib(self):
        """
        specify the local sizes of the variables and which specific indices this specific
        distributed component will handle. Indices do NOT need to be sequential or
        contiguous!
        """
        comm = self.comm
        rank = comm.rank

        # NOTE: evenly_distrib_idxs is a helper function to split the array
        #       up as evenly as possible
        sizes, offsets = evenly_distrib_idxs(comm.size, self.size)
        local_size, local_offset = sizes[rank], offsets[rank]
        self.local_size = int(local_size)

        start = local_offset
        end = local_offset + local_size

        self.set_var_indices('x',
                             val=np.zeros(local_size, float),
                             src_indices=np.arange(start, end, dtype=int))
        self.set_var_indices('y',
                             val=np.zeros(local_size, float),
                             src_indices=np.arange(start, end, dtype=int))
コード例 #17
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign a communicator to the root `System`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the Problem.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        root = self.root

        if self._num_par_doe <= 1:
            self._num_par_doe = 1
            self._load_balance = False

        self._full_comm = comm

        # figure out which parallel DOE we are associated with
        if MPI and self._num_par_doe > 1:
            minprocs, maxprocs = root.get_req_procs()
            if self._load_balance:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe-1,
                                                     comm.size-1)
                sizes = [1]+list(sizes)
                offsets = [0]+[o+1 for o in offsets]
            else:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe,
                                                     comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []

            self._id_map = {}
            for i in range(self._num_par_doe):
                color.extend([i]*sizes[i])
                self._id_map[i] = (sizes[i], offsets[i])

            self._par_doe_id = color[comm.rank]

            if self._load_balance:
                self._casecomm = None
            else:
                casecolor = []
                for i in range(self._num_par_doe):
                    if sizes[i] > 0:
                        casecolor.append(1)
                        casecolor.extend([MPI.UNDEFINED]*(sizes[i]-1))

                # we need a comm that has all the 0 ranks of the subcomms so
                # we can gather multiple cases run as part of parallel DOE.
                if trace: # pragma: no cover
                    debug('%s: splitting casecomm, doe_id=%s' % ('.'.join((root.pathname,
                                                                   'driver')),
                                                        self._par_doe_id))
                self._casecomm = comm.Split(casecolor[comm.rank])
                if trace: # pragma: no cover
                    debug('%s: casecomm split done' % '.'.join((root.pathname,
                                                               'driver')))

                if self._casecomm == MPI.COMM_NULL:
                    self._casecomm = None

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace: # pragma: no cover
                debug('%s: splitting comm, doe_id=%s' % ('.'.join((root.pathname,
                                                               'driver')),
                                                    self._par_doe_id))
            comm = comm.Split(self._par_doe_id)
            if trace: # pragma: no cover
                debug('%s: comm split done' % '.'.join((root.pathname,
                                                           'driver')))
        else:
            self._casecomm = None

        # tell RecordingManager it needs to do a multicase gather
        self.recorders._casecomm = self._casecomm

        root._setup_communicators(comm, parent_dir)
コード例 #18
0
    def _setup_communicators(self, comm, parent_dir):
        """
        Assign a communicator to the root `System`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the Problem.

        parent_dir : str
            Absolute dir of parent `System`.
        """
        root = self.root

        if not MPI or self._num_par_doe <= 1:
            self._num_par_doe = 1
            self._load_balance = False

        self._full_comm = comm

        # figure out which parallel DOE we are associated with
        if self._num_par_doe > 1:
            minprocs, maxprocs = root.get_req_procs()
            if self._load_balance:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe - 1,
                                                     comm.size - 1)
                sizes = [1] + list(sizes)
                offsets = [0] + [o + 1 for o in offsets]
            else:
                sizes, offsets = evenly_distrib_idxs(self._num_par_doe,
                                                     comm.size)

            # a 'color' is assigned to each subsystem, with
            # an entry for each processor it will be given
            # e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
            color = []

            self._id_map = {}
            for i in range(self._num_par_doe):
                color.extend([i] * sizes[i])
                self._id_map[i] = (sizes[i], offsets[i])

            self._par_doe_id = color[comm.rank]

            if self._load_balance:
                self._casecomm = None
            else:
                casecolor = []
                for i in range(self._num_par_doe):
                    if sizes[i] > 0:
                        casecolor.append(1)
                        casecolor.extend([MPI.UNDEFINED] * (sizes[i] - 1))

                # we need a comm that has all the 0 ranks of the subcomms so
                # we can gather multiple cases run as part of parallel DOE.
                if trace:
                    debug('%s: splitting casecomm, doe_id=%s' % ('.'.join(
                        (root.pathname, 'driver')), self._par_doe_id))
                self._casecomm = comm.Split(casecolor[comm.rank])
                if trace:
                    debug('%s: casecomm split done' % '.'.join(
                        (root.pathname, 'driver')))

                if self._casecomm == MPI.COMM_NULL:
                    self._casecomm = None

            # create a sub-communicator for each color and
            # get the one assigned to our color/process
            if trace:
                debug('%s: splitting comm, doe_id=%s' % ('.'.join(
                    (root.pathname, 'driver')), self._par_doe_id))
            comm = comm.Split(self._par_doe_id)
            if trace:
                debug('%s: comm split done' % '.'.join(
                    (root.pathname, 'driver')))
        else:
            self._casecomm = None

        # tell RecordingManager it needs to do a multicase gather
        self.recorders._casecomm = self._casecomm

        root._setup_communicators(comm, parent_dir)
コード例 #19
0
class PredeterminedRunsDriver(openmdao.api.PredeterminedRunsDriver):
    def __init__(self, original_dir, num_samples=None, *args, **kwargs):
        if type(self) == PredeterminedRunsDriver:
            raise Exception('PredeterminedRunsDriver is an abstract class')
        if MPI:
            comm = MPI.COMM_WORLD
            kwargs.setdefault('num_par_doe', comm.Get_size())
        else:
            kwargs.setdefault('num_par_doe', 1)
        super(PredeterminedRunsDriver, self).__init__(*args, **kwargs)
        self.supports['gradients'] = False
        self.original_dir = original_dir

        # Make sure self.original_dir exists (if this was instantiated from a subproblem, it might not)
        try:
            os.makedirs(self.original_dir)
        except OSError as e:
            if e.errno == errno.EEXIST and os.path.isdir(self.original_dir):
                pass
            else:
                raise

        self.use_restart = True

    def _setup_communicators(self, comm, parent_dir):
        super(PredeterminedRunsDriver,
              self)._setup_communicators(comm, parent_dir)
        if self.use_restart:
            self.restart = RestartRecorder(self.original_dir, comm)

    def run(self, problem):
        """Build a runlist and execute the Problem for each set of generated parameters."""
        self.iter_count = 0

        if MPI and self._num_par_doe > 1:
            runlist = self._distrib_build_runlist()
        else:
            runlist = self._deserialize_or_create_runlist()

        testbenchexecutor.progress_service.update_progress(
            "Iteration 0/{} completed".format(len(runlist)), 0, len(runlist))

        # For each runlist entry, run the system and record the results
        for run in runlist:
            run_success = self.run_one(problem, run)
            testbenchexecutor.progress_service.update_progress(
                "Iteration {}/{} {}".format(
                    self.iter_count, len(runlist),
                    "completed" if run_success else "failed"), self.iter_count,
                len(runlist))

        if self.use_restart:
            self.restart.close()

    def run_one(self, problem, run):
        run_success = True

        for dv_name, dv_val in run:
            self.set_desvar(dv_name, dv_val)

        metadata = create_local_meta(None, 'Driver')

        update_local_meta(metadata, (self.iter_count, ))

        try:
            problem.root.solve_nonlinear(metadata=metadata)
        except AnalysisError:
            metadata['msg'] = traceback.format_exc()
            metadata['success'] = 0
            run_success = False
        self.recorders.record_iteration(problem.root, metadata)
        self.iter_count += 1
        if self.use_restart:
            self.restart.record_iteration()

        return run_success

    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm
        job_list = None
        if comm.rank == 0:
            debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [
                list(case) for case in self._deserialize_or_create_runlist()
            ]  # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(
                self._num_par_doe, len(run_list))
            job_list = [
                run_list[o:o + s] for o, s in zip(run_offsets, run_sizes)
            ]
コード例 #20
0
class LatinHypercubeDriver(PredeterminedRunsDriver):
    """Design-of-experiments Driver implementing the Latin Hypercube method.

    Args
    ----
    num_samples : int, optional
        The number of samples to run. Defaults to 1.

    seed : int or None, optional
        Random seed.  Defaults to None.

    num_par_doe : int, optional
        The number of DOE cases to run concurrently.  Defaults to 1.

    """
    def __init__(self, num_samples=1, seed=None, num_par_doe=1):
        super(LatinHypercubeDriver, self).__init__(num_par_doe=num_par_doe)
        self.num_samples = num_samples
        self.seed = seed

    def _build_runlist(self):
        """Build a runlist based on the Latin Hypercube method."""
        design_vars = self.get_desvar_metadata()
        self.num_design_vars = len(design_vars)
        if self.seed is not None:
            seed(self.seed)
            np.random.seed(self.seed)

        # Generate an LHC of the proper size
        rand_lhc = self._get_lhc()

        # Map LHC to buckets
        buckets = {}

        for j, (name, bounds) in enumerate(iteritems(design_vars)):
            design_var_buckets = self._get_buckets(bounds['lower'],
                                                   bounds['upper'])
            buckets[name] = [
                design_var_buckets[rand_lhc[i, j]]
                for i in range(self.num_samples)
            ]

        # Return random values in given buckets
        for i in range(self.num_samples):
            yield ((key, np.random.uniform(bounds[i][0], bounds[i][1]))
                   for key, bounds in iteritems(buckets))

    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm
        job_list = None
        if comm.rank == 0:
            debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [list(case) for case in self._build_runlist()
                        ]  # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(
                self._num_par_doe, len(run_list))
            job_list = [
                run_list[o:o + s] for o, s in zip(run_offsets, run_sizes)
            ]
コード例 #21
0
class LatinHypercubeDriver(PredeterminedRunsDriver):
    """Design-of-experiments Driver implementing the Latin Hypercube method.

    Args
    ----
    num_samples : int, optional
        The number of samples to run. Defaults to 1.

    seed : int or None, optional
        Random seed.  Defaults to None.

    num_par_doe : int, optional
        The number of DOE cases to run concurrently.  Defaults to 1.

    load_balance : bool, Optional
        If True, use rank 0 as master and load balance cases among all of the
        other ranks. Defaults to False.

    """
    def __init__(self,
                 num_samples=1,
                 seed=None,
                 num_par_doe=1,
                 load_balance=False):
        super(LatinHypercubeDriver, self).__init__(num_par_doe=num_par_doe,
                                                   load_balance=load_balance)
        self.num_samples = num_samples
        self.seed = seed

    def _build_runlist(self):
        """Build a runlist based on the Latin Hypercube method."""
        design_vars = self.get_desvar_metadata()

        # Add up sizes
        self.num_design_vars = sum(meta['size']
                                   for meta in itervalues(design_vars))

        if self.seed is not None:
            seed(self.seed)
            np.random.seed(self.seed)

        # Generate an LHC of the proper size
        rand_lhc = self._get_lhc()

        # Map LHC to buckets
        buckets = OrderedDict()
        j = 0

        for (name, bounds) in iteritems(design_vars):
            buckets[name] = []

            # Support for array desvars
            val = self.root.unknowns._dat[name].val
            nval = bounds['size']

            for k in range(nval):

                lowb = bounds['lower']
                upb = bounds['upper']
                if isinstance(lowb, np.ndarray):
                    lowb = lowb[k]
                if isinstance(upb, np.ndarray):
                    upb = upb[k]

                design_var_buckets = self._get_buckets(lowb, upb)
                buckets[name].append([
                    design_var_buckets[rand_lhc[i, j]]
                    for i in range(self.num_samples)
                ])
                j += 1

        # Return random values in given buckets
        for i in range(self.num_samples):
            sample = []
            for key, bounds in iteritems(buckets):
                sample.append([
                    key,
                    np.array([
                        np.random.uniform(bounds[k][i][0], bounds[k][i][1])
                        for k in range(design_vars[key]['size'])
                    ])
                ])
            yield sample

    def _distrib_build_runlist(self):
        """
        Returns an iterator over only those cases meant to execute
        in the current rank as part of a parallel DOE. A latin hypercube,
        unlike some other DOE generators, is created in one rank and then
        the appropriate cases are scattered to the appropriate ranks.
        """
        comm = self._full_comm

        # get the par_doe_id from every rank in the full comm so we know which
        # cases to scatter where
        doe_ids = comm.allgather(self._par_doe_id)

        job_list = None
        if comm.rank == 0:
            if trace:
                debug('Parallel DOE using %d procs' % self._num_par_doe)
            run_list = [list(case) for case in self._build_runlist()
                        ]  # need to run iterator

            run_sizes, run_offsets = evenly_distrib_idxs(
                self._num_par_doe, len(run_list))
            jobs = [run_list[o:o + s] for o, s in zip(run_offsets, run_sizes)]

            job_list = [jobs[i] for i in doe_ids]