Пример #1
0
    def get_distrib_idxs(self):
        """ component declares the local sizes and sets initial values
        for all distributed inputs and outputs. Returns a dict of
        index arrays keyed to variable names.
        """

        comm = self.mpi.comm
        rank = comm.rank

        self.sizes, self.offsets = evenly_distrib_idxs(comm.size,
                                                                  self.arr_size)
        start = self.offsets[rank]
        end = start + self.sizes[rank]

        #need to re-initialize the variable to have the correct local size
        self.invec = np.ones(self.sizes[comm.rank], dtype=float)

        return { 'invec': make_idx_array(start, end) }
Пример #2
0
    def execute(self):
        """ Run each parameter set. """

        color = self._color[self.mpi.rank]

        if color == MPI.UNDEFINED or self.mpi.comm == MPI.COMM_NULL:
            return

        # Prepare parameters and responses.
        case_paths = {}
        inputs = []
        values = []

        for path in self.get_parameters():
            if isinstance(path, tuple):
                for target in path:
                    inputs.append(target)
                path = path[0]
            else:
                inputs.append(path)

            val = self.case_inputs.get(make_legal_path(path))

            values.append(val)

        if not inputs:
            return

        length = len(values[0])

        for path in self.get_responses():
            case_paths[path] = make_legal_path(path)

        sizes, offsets = evenly_distrib_idxs(self._num_parallel_subs, length)
        start = offsets[color]
        end = start + sizes[color]

        self.init_responses(length)

        # Run each parameter set.
        for i in range(start, end):

            # Set inputs.
            for j, path in enumerate(inputs):
                self.set_parameter_by_name(path, values[j][i])

            # Run workflow.
            with MPIContext():
                self.run_iteration()

            # Get outputs.
            for path in self.get_responses():
                cpath = case_paths[path]
                self.case_outputs.get(cpath)[i] = self.parent.get(path)

        if self._num_parallel_subs > 1:
            # Now, collect the results back from all parallel processes
            for path in self.get_responses():
                path = case_paths[path]
                vals = self.case_outputs.get(path)
                if self._resp_comm != MPI.COMM_NULL:
                    allvals = self._resp_comm.gather(vals, root=0)

                    if self._resp_comm.rank == 0:
                        for i in range(self._num_parallel_subs):
                            vals[offsets[i]:offsets[i] +
                                 sizes[i]] = allvals[i][offsets[i]:offsets[i] +
                                                        sizes[i]]
                        junk = self.mpi.comm.bcast(vals, root=0)
                    else:
                        vals = self.mpi.comm.bcast(None, root=0)
                else:
                    vals = self.mpi.comm.bcast(vals, root=0)

                self.case_outputs.set(path, vals)
    def execute(self):
        """ Run each parameter set. """

        color = self._color[self.mpi.rank]

        if color == MPI.UNDEFINED or self.mpi.comm == MPI.COMM_NULL:
            return

        # Prepare parameters and responses.
        case_paths = {}
        inputs = []
        values = []

        for path in self.get_parameters():
            if isinstance(path, tuple):
                for target in path:
                    inputs.append(target)
                path = path[0]
            else:
                inputs.append(path)

            val = self.case_inputs.get(make_legal_path(path))

            values.append(val)

        if not inputs:
            return

        length = len(values[0])

        for path in self.get_responses():
            case_paths[path] = make_legal_path(path)

        sizes, offsets = evenly_distrib_idxs(self._num_parallel_subs,
                                             length)
        start = offsets[color]
        end = start + sizes[color]

        self.init_responses(length)

        # Run each parameter set.
        for i in range(start, end):

            # Set inputs.
            for j, path in enumerate(inputs):
                self.set_parameter_by_name(path, values[j][i])

            # Run workflow.
            with MPIContext():
                self.run_iteration()

            # Get outputs.
            for path in self.get_responses():
                cpath = case_paths[path]
                self.case_outputs.get(cpath)[i] = self.parent.get(path)

        if self._num_parallel_subs > 1:
            # Now, collect the results back from all parallel processes
            for path in self.get_responses():
                path = case_paths[path]
                vals = self.case_outputs.get(path)
                if self._resp_comm != MPI.COMM_NULL:
                    allvals = self._resp_comm.gather(vals, root=0)

                    if self._resp_comm.rank == 0:
                        for i in range(self._num_parallel_subs):
                            vals[offsets[i]:offsets[i]+sizes[i]] = allvals[i][offsets[i]:offsets[i]+sizes[i]]
                        junk = self.mpi.comm.bcast(vals, root=0)
                    else:
                        vals = self.mpi.comm.bcast(None, root=0)
                else:
                    vals = self.mpi.comm.bcast(vals, root=0)

                self.case_outputs.set(path, vals)