def _MPIVisualizer_visualize(d, U, **kwargs): d = mpi.get_object(d) if isinstance(U, tuple): U = tuple(mpi.get_object(u) for u in U) else: U = mpi.get_object(U) d.visualize(U, **kwargs)
def _MPIVectorAutoComm_dot(self, other): self = mpi.get_object(self) other = mpi.get_object(other) local_result = self.dot(other) assert local_result.dtype == np.float64 results = np.empty((mpi.size,), dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_result, results, root=0) if mpi.rank0: return np.sum(results)
def _MPIVectorArrayAutoComm_pairwise_dot(self, other, ind=None, o_ind=None): self = mpi.get_object(self) other = mpi.get_object(other) local_results = self.pairwise_dot(other, ind=ind, o_ind=o_ind) assert local_results.dtype == np.float64 results = np.empty((mpi.size,) + local_results.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_results, results, root=0) if mpi.rank0: return np.sum(results, axis=0)
def mpi_wrap_operator(obj_id, mpi_range, mpi_source, with_apply2=False, pickle_local_spaces=True, space_type=MPIVectorSpace): """Wrap MPI distributed local |Operators| to a global |Operator| on rank 0. Given MPI distributed local |Operators| referred to by the :class:`~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Operator| which manages these distributed operators from rank 0. This is done by instantiating :class:`MPIOperator`. Additionally, the structure of the wrapped operators is preserved. E.g. |LincombOperators| will be wrapped as a |LincombOperator| of :class:`MPIOperators <MPIOperator>`. Parameters ---------- See :class:`MPIOperator`. Returns ------- The wrapped |Operator|. """ op = mpi.get_object(obj_id) if isinstance(op, LincombOperator): obj_ids = mpi.call(_mpi_wrap_operator_LincombOperator_manage_operators, obj_id) return LincombOperator([mpi_wrap_operator(o, mpi_range, mpi_source, with_apply2, pickle_local_spaces, space_type) for o in obj_ids], op.coefficients, name=op.name) elif isinstance(op, VectorArrayOperator): array_obj_id, local_spaces = mpi.call(_mpi_wrap_operator_VectorArrayOperator_manage_array, obj_id, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) return VectorArrayOperator(space_type(local_spaces).make_array(array_obj_id), adjoint=op.adjoint, name=op.name) else: return MPIOperator(obj_id, mpi_range, mpi_source, with_apply2, pickle_local_spaces, space_type)
def _mpi_wrap_operator_VectorArrayOperator_manage_array(obj_id): op = mpi.get_object(obj_id) array_obj_id = mpi.manage_object(op._array) subtypes = mpi.comm.gather(op._array.subtype, root=0) mpi.remove_object(obj_id) if mpi.rank0: return array_obj_id, tuple(subtypes)
def __init__(self, obj_id, mpi_range, mpi_source, with_apply2=False, pickle_local_spaces=True, space_type=MPIVectorSpace): assert mpi_source or mpi_range self.obj_id = obj_id self.mpi_source = mpi_source self.mpi_range = mpi_range self.op = op = mpi.get_object(obj_id) self.with_apply2 = with_apply2 self.pickle_local_spaces = pickle_local_spaces self.space_type = space_type self.linear = op.linear self.name = op.name self.build_parameter_type(op) if mpi_source: local_spaces = mpi.call(_MPIOperator_get_local_spaces, obj_id, True, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) self.source = space_type(local_spaces) else: self.source = op.source if mpi_range: local_spaces = mpi.call(_MPIOperator_get_local_spaces, obj_id, False, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) self.range = space_type(local_spaces) else: self.range = op.range
def mpi_wrap_operator(obj_id, functional=False, vector=False, with_apply2=False, pickle_subtypes=True, array_type=MPIVectorArray): """Wrap MPI distributed local |Operators| to a global |Operator| on rank 0. Given MPI distributed local |Operators| referred to by the `~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Operator| which manages these distributed operators from rank 0. This is done by instantiating :class:`MPIOperator`. Additionally, the structure of the wrapped operators is preserved. E.g. |LincombOperators| will be wrapped as a |LincombOperator| of :class:`MPIOperators`. Parameters ---------- See :class:`MPIOperator`. Returns ------- The wrapped |Operator|. """ op = mpi.get_object(obj_id) if isinstance(op, LincombOperator): obj_ids = mpi.call(_mpi_wrap_operator_LincombOperator_manage_operators, obj_id) return LincombOperator([mpi_wrap_operator(o, functional, vector, with_apply2, pickle_subtypes, array_type) for o in obj_ids], op.coefficients, name=op.name) elif isinstance(op, VectorArrayOperator): array_obj_id, subtypes = mpi.call(_mpi_wrap_operator_VectorArrayOperator_manage_array, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0],) return VectorArrayOperator(array_type(type(op._array), subtypes, array_obj_id), transposed=op.transposed, name=op.name) else: return MPIOperator(obj_id, functional, vector, with_apply2, pickle_subtypes, array_type)
def __init__(self, obj_id, functional=False, vector=False, with_apply2=False, pickle_subtypes=True, array_type=MPIVectorArray): assert not (functional and vector) self.obj_id = obj_id self.op = op = mpi.get_object(obj_id) self.functional = functional self.vector = vector self.with_apply2 = with_apply2 self.pickle_subtypes = pickle_subtypes self.array_type = array_type self.linear = op.linear self.name = op.name self.build_parameter_type(inherits=(op,)) if vector: self.source = NumpyVectorSpace(1) assert self.source == op.source else: subtypes = mpi.call(_MPIOperator_get_source_subtypes, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0],) self.source = VectorSpace(array_type, (op.source.type, subtypes)) if functional: self.range = NumpyVectorSpace(1) assert self.range == op.range else: subtypes = mpi.call(_MPIOperator_get_range_subtypes, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0],) self.range = VectorSpace(array_type, (op.range.type, subtypes))
def mpi_wrap_discretization(obj_id, use_with=False, with_apply2=False, array_type=MPIVectorArray): """Wrap MPI distributed local |Discretizations| to a global |Discretization| on rank 0. Given MPI distributed local |Discretizations| referred to by the `~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Discretization| which manages these distributed discretizations from rank 0. This is done by first wrapping all |Operators| of the |Discretization| using :func:`~pymor.operators.mpi.mpi_wrap_operator`. When `use_with` is `False`, an :class:`MPIDiscretization` is instatiated with the wrapped operators. A call to :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` will then use an MPI parallel call to the :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` methods of the wrapped local |Discretizations| to obtain the solution. This is usually what you want when the actual solve is performed by an implementation in the external solver. When `use_with` is `True`, :meth:`~pymor.core.interfaces.ImmutableInterface.with_` is called on the local |Discretization| on rank 0, to obtain a new |Discretization| with the wrapped MPI |Operators|. This is mainly useful when the local discretizations are generic |Discretizations| as in :mod:`pymor.discretizations.basic` and :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` is implemented directly in pyMOR via operations on the contained |Operators|. Parameters ---------- obj_id :class:`~pymor.tools.mpi.ObjectId` of the local |Discretization| on each rank. use_with See above. with_apply2 See :class:`~pymor.operators.mpi.MPIOperator`. array_type See :class:`~pymor.operators.mpi.MPIOperator`. """ operators, functionals, vectors, products = \ mpi.call(_mpi_wrap_discretization_manage_operators, obj_id) operators = {k: mpi_wrap_operator(v, with_apply2=with_apply2, array_type=array_type) if v else None for k, v in operators.iteritems()} functionals = {k: mpi_wrap_operator(v, functional=True, with_apply2=with_apply2, array_type=array_type) if v else None for k, v in functionals.iteritems()} vectors = {k: mpi_wrap_operator(v, vector=True, with_apply2=with_apply2, array_type=array_type) if v else None for k, v in vectors.iteritems()} products = {k: mpi_wrap_operator(v, with_apply2=with_apply2, array_type=array_type) if v else None for k, v in products.iteritems()} if products else None if use_with: d = mpi.get_object(obj_id) visualizer = MPIVisualizer(obj_id) return d.with_(operators=operators, functionals=functionals, vector_operators=vectors, products=products, visualizer=visualizer, cache_region=None) else: return MPIDiscretization(obj_id, operators, functionals, vectors, products, array_type=array_type)
def _apply_only(self, function, worker, *args, **kwargs): payload = mpi.get_object(self._payload) payload[0] = (function, args, kwargs) try: result = mpi.call(mpi.function_call, _single_worker_call_function, self._payload, worker) finally: payload[0] = None return result
def _MPIDiscretization_get_local_spaces(self, pickle_local_spaces): self = mpi.get_object(self) local_space = self.solution_space if not pickle_local_spaces: local_space = _register_local_space(local_space) local_spaces = mpi.comm.gather(local_space, root=0) if mpi.rank0: return tuple(local_spaces)
def _map(self, function, chunks, **kwargs): payload = mpi.get_object(self._payload) payload[0] = chunks try: result = mpi.call(mpi.function_call, _worker_map_function, self._payload, function, **kwargs) finally: payload[0] = None return result
def _MPIDiscretization_get_subtypes(self, pickle_subtypes): self = mpi.get_object(self) subtype = self.solution_space.subtype if not pickle_subtypes: subtype = _register_subtype(subtype) subtypes = mpi.comm.gather(subtype, root=0) if mpi.rank0: return tuple(subtypes)
def _MPIOperator_get_range_subtypes(self, pickle_subtypes): self = mpi.get_object(self) subtype = self.range.subtype if not pickle_subtypes: subtype = _register_subtype(subtype) subtypes = mpi.comm.gather(subtype, root=0) if mpi.rank0: return tuple(subtypes)
def _mpi_wrap_discretization_manage_operators(obj_id): d = mpi.get_object(obj_id) operators = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.operators.iteritems())} functionals = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.functionals.iteritems())} vectors = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.vector_operators.iteritems())} products = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.products.iteritems())} if d.products else None if mpi.rank0: return operators, functionals, vectors, products
def _MPIVectorArrayAutoComm_l2_norm2(self, ind=None): self = mpi.get_object(self) local_results = self.l2_norm2(ind=ind) assert local_results.dtype == np.float64 results = np.empty((mpi.size,) + local_results.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_results, results, root=0) if mpi.rank0: return np.sum(results, axis=0)
def _MPIOperator_get_local_spaces(self, source, pickle_local_spaces): self = mpi.get_object(self) local_space = self.source if source else self.range if not pickle_local_spaces: local_space = _register_local_space(local_space) local_spaces = mpi.comm.gather(local_space, root=0) if mpi.rank0: return tuple(local_spaces)
def _MPIVectorAutoComm_l2_norm(self): self = mpi.get_object(self) local_result = self.l2_norm2() assert local_result.dtype == np.float64 results = np.empty((mpi.size,), dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_result, results, root=0) if mpi.rank0: return np.sqrt(np.sum(results))
def _mpi_wrap_operator_VectorArrayOperator_manage_array(obj_id, pickle_subtypes): op = mpi.get_object(obj_id) array_obj_id = mpi.manage_object(op._array) subtype = op._array.subtype if not pickle_subtypes: subtype = _register_subtype(subtype) subtypes = mpi.comm.gather(subtype, root=0) mpi.remove_object(obj_id) if mpi.rank0: return array_obj_id, tuple(subtypes)
def _mpi_wrap_operator_VectorArrayOperator_manage_array(obj_id, pickle_local_spaces): op = mpi.get_object(obj_id) array_obj_id = mpi.manage_object(op._array) local_space = op._array.space if not pickle_local_spaces: local_space = _register_local_space(local_space) local_spaces = mpi.comm.gather(local_space, root=0) mpi.remove_object(obj_id) if mpi.rank0: return array_obj_id, tuple(local_spaces)
def __init__(self, obj_id, operators, products=None, pickle_local_spaces=True, space_type=MPIVectorSpace): d = mpi.get_object(obj_id) visualizer = MPIVisualizer(obj_id) super().__init__(operators=operators, products=products, visualizer=visualizer, cache_region=None, name=d.name) self.obj_id = obj_id local_spaces = mpi.call(_MPIDiscretization_get_local_spaces, obj_id, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) self.solution_space = space_type(local_spaces) self.build_parameter_type(d) self.parameter_space = d.parameter_space
def _MPIVectorArrayAutoComm_amax(self, ind=None): self = mpi.get_object(self) local_inds, local_vals = self.amax(ind=ind) assert local_inds.dtype == np.int64 assert local_vals.dtype == np.float64 inds = np.empty((mpi.size,) + local_inds.shape, dtype=np.int64) if mpi.rank0 else None vals = np.empty((mpi.size,) + local_inds.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_inds, inds, root=0) mpi.comm.Gather(local_vals, vals, root=0) if mpi.rank0: return inds, vals
def __init__(self, obj_id, operators, functionals, vector_operators, products=None, array_type=MPIVectorArray): d = mpi.get_object(obj_id) visualizer = MPIVisualizer(obj_id) super(MPIDiscretization, self).__init__(operators, functionals, vector_operators, products=products, visualizer=visualizer, cache_region=None, name=d.name) self.obj_id = obj_id subtypes = mpi.call(_MPIDiscretization_get_subtypes, obj_id) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0],) self.solution_space = VectorSpace(array_type, (d.solution_space.type, subtypes)) self.build_parameter_type(inherits=(d,)) self.parameter_space = d.parameter_space
def assemble_lincomb(self, operators, coefficients, solver_options=None, name=None): if not all(isinstance(op, MPIOperator) for op in operators): return None assert solver_options is None operators = [op.obj_id for op in operators] obj_id = mpi.call(_MPIOperator_assemble_lincomb, operators, coefficients, name=name) op = mpi.get_object(obj_id) if op is None: mpi.call(mpi.remove_object, obj_id) return None else: return self.with_(obj_id=obj_id)
def _MPIVectorArrayAutoComm_components(self, offsets, component_indices, ind=None): self = mpi.get_object(self) offset = offsets[mpi.rank] dim = self.dim my_indices = np.logical_and(component_indices >= offset, component_indices < offset + dim) local_results = np.zeros((self.len_ind(ind), len(component_indices))) local_results[:, my_indices] = self.components(component_indices[my_indices] - offset, ind=ind) assert local_results.dtype == np.float64 results = np.empty((mpi.size,) + local_results.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_results, results, root=0) if mpi.rank0: return np.sum(results, axis=0)
def _MPIVectorArrayAutoComm_amax(self): self = mpi.get_object(self) local_inds, local_vals = self.amax() assert local_inds.dtype == np.int64 assert local_vals.dtype == np.float64 inds = np.empty( (mpi.size, ) + local_inds.shape, dtype=np.int64) if mpi.rank0 else None vals = np.empty((mpi.size, ) + local_inds.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_inds, inds, root=0) mpi.comm.Gather(local_vals, vals, root=0) if mpi.rank0: return inds, vals
def _MPIVectorArrayAutoComm_dofs(self, offsets, dof_indices): self = mpi.get_object(self) offset = offsets[mpi.rank] dim = self.dim my_indices = np.logical_and(dof_indices >= offset, dof_indices < offset + dim) local_results = np.zeros((len(self), len(dof_indices))) local_results[:, my_indices] = self.dofs(dof_indices[my_indices] - offset) assert local_results.dtype == np.float64 results = np.empty( (mpi.size, ) + local_results.shape, dtype=np.float64) if mpi.rank0 else None mpi.comm.Gather(local_results, results, root=0) if mpi.rank0: return np.sum(results, axis=0)
def mpi_wrap_operator(obj_id, functional=False, vector=False, with_apply2=False, pickle_subtypes=True, array_type=MPIVectorArray): """Wrap MPI distributed local |Operators| to a global |Operator| on rank 0. Given MPI distributed local |Operators| referred to by the `~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Operator| which manages these distributed operators from rank 0. This is done by instantiating :class:`MPIOperator`. Additionally, the structure of the wrapped operators is preserved. E.g. |LincombOperators| will be wrapped as a |LincombOperator| of :class:`MPIOperators`. Parameters ---------- See :class:`MPIOperator`. Returns ------- The wrapped |Operator|. """ op = mpi.get_object(obj_id) if isinstance(op, LincombOperator): obj_ids = mpi.call(_mpi_wrap_operator_LincombOperator_manage_operators, obj_id) return LincombOperator([ mpi_wrap_operator(o, functional, vector, with_apply2, pickle_subtypes, array_type) for o in obj_ids ], op.coefficients, name=op.name) elif isinstance(op, VectorArrayOperator): array_obj_id, subtypes = mpi.call( _mpi_wrap_operator_VectorArrayOperator_manage_array, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0], ) return VectorArrayOperator(array_type(type(op._array), subtypes, array_obj_id), transposed=op.transposed, name=op.name) else: return MPIOperator(obj_id, functional, vector, with_apply2, pickle_subtypes, array_type)
def mpi_wrap_operator(obj_id, mpi_range, mpi_source, with_apply2=False, pickle_local_spaces=True, space_type=MPIVectorSpace): """Wrap MPI distributed local |Operators| to a global |Operator| on rank 0. Given MPI distributed local |Operators| referred to by the :class:`~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Operator| which manages these distributed operators from rank 0. This is done by instantiating :class:`MPIOperator`. Additionally, the structure of the wrapped operators is preserved. E.g. |LincombOperators| will be wrapped as a |LincombOperator| of :class:`MPIOperators <MPIOperator>`. Parameters ---------- See :class:`MPIOperator`. Returns ------- The wrapped |Operator|. """ op = mpi.get_object(obj_id) if isinstance(op, LincombOperator): obj_ids = mpi.call(_mpi_wrap_operator_LincombOperator_manage_operators, obj_id) return LincombOperator([ mpi_wrap_operator(o, mpi_range, mpi_source, with_apply2, pickle_local_spaces, space_type) for o in obj_ids ], op.coefficients, name=op.name) elif isinstance(op, VectorArrayOperator): array_obj_id, local_spaces = mpi.call( _mpi_wrap_operator_VectorArrayOperator_manage_array, obj_id, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0], ) return VectorArrayOperator( space_type(local_spaces).make_array(array_obj_id), adjoint=op.adjoint, name=op.name) else: return MPIOperator(obj_id, mpi_range, mpi_source, with_apply2, pickle_local_spaces, space_type)
def _mpi_wrap_discretization_manage_operators(obj_id): d = mpi.get_object(obj_id) operators = { k: mpi.manage_object(v) if v else None for k, v in sorted(d.operators.items()) } functionals = { k: mpi.manage_object(v) if v else None for k, v in sorted(d.functionals.items()) } vectors = { k: mpi.manage_object(v) if v else None for k, v in sorted(d.vector_operators.items()) } products = { k: mpi.manage_object(v) if v else None for k, v in sorted(d.products.items()) } if d.products else None if mpi.rank0: return operators, functionals, vectors, products
def __init__(self, obj_id, operators, products=None, pickle_local_spaces=True, space_type=MPIVectorSpace): d = mpi.get_object(obj_id) visualizer = MPIVisualizer(obj_id) super().__init__(operators=operators, products=products, visualizer=visualizer, cache_region=None, name=d.name) self.obj_id = obj_id local_spaces = mpi.call(_MPIDiscretization_get_local_spaces, obj_id, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0], ) self.solution_space = space_type(local_spaces) self.build_parameter_type(d) self.parameter_space = d.parameter_space
def _mpi_wrap_model_manage_operators(obj_id, mpi_spaces, use_with, base_type): m = mpi.get_object(obj_id) attributes_to_consider = m.with_arguments if use_with else base_type._init_arguments attributes = {k: getattr(m, k) for k in attributes_to_consider} def process_attribute(v): if isinstance(v, OperatorInterface): mpi_range = type(v.range) in mpi_spaces or v.range.id in mpi_spaces mpi_source = type(v.source) in mpi_spaces or v.source.id in mpi_spaces if mpi_range or mpi_source: return _OperatorToWrap(mpi.manage_object(v), mpi_range, mpi_source) else: return v else: return v managed_attributes = {k: _map_children(process_attribute, v) for k, v in sorted(attributes.items()) if k not in {'cache_region', 'visualizer'}} if mpi.rank0: return managed_attributes
def _mpi_wrap_model_manage_operators(obj_id, mpi_spaces, use_with, base_type): m = mpi.get_object(obj_id) attributes_to_consider = m._init_arguments if use_with else base_type._init_arguments attributes = {k: getattr(m, k) for k in attributes_to_consider} def process_attribute(v): if isinstance(v, Operator): mpi_range = type(v.range) in mpi_spaces or v.range.id in mpi_spaces mpi_source = type(v.source) in mpi_spaces or v.source.id in mpi_spaces if mpi_range or mpi_source: return _OperatorToWrap(mpi.manage_object(v), mpi_range, mpi_source) else: return v else: return v managed_attributes = {k: _map_children(process_attribute, v) for k, v in sorted(attributes.items()) if k not in {'cache_region', 'visualizer'}} if mpi.rank0: return managed_attributes
def __init__(self, obj_id, functional=False, vector=False, with_apply2=False, pickle_subtypes=True, array_type=MPIVectorArray): assert not (functional and vector) self.obj_id = obj_id self.op = op = mpi.get_object(obj_id) self.functional = functional self.vector = vector self.with_apply2 = with_apply2 self.pickle_subtypes = pickle_subtypes self.array_type = array_type self.linear = op.linear self.name = op.name self.build_parameter_type(inherits=(op, )) if vector: self.source = NumpyVectorSpace(1) assert self.source == op.source else: subtypes = mpi.call(_MPIOperator_get_source_subtypes, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0], ) self.source = VectorSpace(array_type, (op.source.type, subtypes)) if functional: self.range = NumpyVectorSpace(1) assert self.range == op.range else: subtypes = mpi.call(_MPIOperator_get_range_subtypes, obj_id, pickle_subtypes) if all(subtype == subtypes[0] for subtype in subtypes): subtypes = (subtypes[0], ) self.range = VectorSpace(array_type, (op.range.type, subtypes))
def __init__(self, obj_id, mpi_range, mpi_source, with_apply2=False, pickle_local_spaces=True, space_type=MPIVectorSpace): assert mpi_source or mpi_range self.__auto_init(locals()) self.op = op = mpi.get_object(obj_id) self.linear = op.linear self.name = op.name self.build_parameter_type(op) if mpi_source: local_spaces = mpi.call(_MPIOperator_get_local_spaces, obj_id, True, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) self.source = space_type(local_spaces) else: self.source = op.source if mpi_range: local_spaces = mpi.call(_MPIOperator_get_local_spaces, obj_id, False, pickle_local_spaces) if all(ls == local_spaces[0] for ls in local_spaces): local_spaces = (local_spaces[0],) self.range = space_type(local_spaces) else: self.range = op.range self.solver_options = op.solver_options
def mpi_wrap_model(local_models, mpi_spaces=('STATE', ), use_with=True, with_apply2=False, pickle_local_spaces=True, space_type=MPIVectorSpace, base_type=None): """Wrap MPI distributed local |Models| to a global |Model| on rank 0. Given MPI distributed local |Models| referred to by the :class:`~pymor.tools.mpi.ObjectId` `local_models`, return a new |Model| which manages these distributed models from rank 0. This is done by first wrapping all |Operators| of the |Model| using :func:`~pymor.operators.mpi.mpi_wrap_operator`. Alternatively, `local_models` can be a callable (with no arguments) which is then called on each rank to instantiate the local |Models|. When `use_with` is `False`, an :class:`MPIModel` is instantiated with the wrapped operators. A call to :meth:`~pymor.models.interfaces.ModelInterface.solve` will then use an MPI parallel call to the :meth:`~pymor.models.interfaces.ModelInterface.solve` methods of the wrapped local |Models| to obtain the solution. This is usually what you want when the actual solve is performed by an implementation in the external solver. When `use_with` is `True`, :meth:`~pymor.core.interfaces.ImmutableInterface.with_` is called on the local |Model| on rank 0, to obtain a new |Model| with the wrapped MPI |Operators|. This is mainly useful when the local models are generic |Models| as in :mod:`pymor.models.basic` and :meth:`~pymor.models.interfaces.ModelInterface.solve` is implemented directly in pyMOR via operations on the contained |Operators|. Parameters ---------- local_models :class:`~pymor.tools.mpi.ObjectId` of the local |Models| on each rank or a callable generating the |Models|. mpi_spaces List of types or ids of |VectorSpaces| which are MPI distributed and need to be wrapped. use_with See above. with_apply2 See :class:`~pymor.operators.mpi.MPIOperator`. pickle_local_spaces See :class:`~pymor.operators.mpi.MPIOperator`. space_type See :class:`~pymor.operators.mpi.MPIOperator`. """ assert use_with or isinstance(base_type, ModelInterface) if not isinstance(local_models, mpi.ObjectId): local_models = mpi.call(mpi.function_call_manage, local_models) attributes = mpi.call(_mpi_wrap_model_manage_operators, local_models, mpi_spaces, use_with, base_type) wrapped_attributes = { k: _map_children( lambda v: mpi_wrap_operator(*v, with_apply2=with_apply2, pickle_local_spaces= pickle_local_spaces, space_type=space_type) if isinstance(v, _OperatorToWrap) else v, v) for k, v in attributes.items() } if use_with: m = mpi.get_object(local_models) if m.visualizer: wrapped_attributes['visualizer'] = MPIVisualizer(local_models) m = m.with_(**wrapped_attributes) m.disable_caching() return m else: class MPIWrappedModel(MPIModel, base_type): pass return MPIWrappedModel(local_models, **wrapped_attributes)
def _MPIVectorArray_axpy(obj_id, alpha, x_obj_id): obj = mpi.get_object(obj_id) x = mpi.get_object(x_obj_id) obj.axpy(alpha, x)
def _MPIVectorSpace_check_local_spaces(local_spaces, obj_id): U = mpi.get_object(obj_id) local_space = _get_local_space(local_spaces) results = mpi.comm.gather(U in local_space, root=0) if mpi.rank0: return np.all(results)
def _MPIVectorAutoComm_dim(self): self = mpi.get_object(self) dims = mpi.comm.gather(self.dim, root=0) if mpi.rank0: return sum(dims)
def _MPIVectorArray_axpy(obj_id, alpha, x_obj_id, ind=None, x_ind=None): obj = mpi.get_object(obj_id) x = mpi.get_object(x_obj_id) obj.axpy(alpha, x, ind=ind, x_ind=x_ind)
def _MPIVectorArray_dim(obj_id): obj = mpi.get_object(obj_id) return obj.dim
def _mpi_wrap_operator_LincombOperator_manage_operators(obj_id): op = mpi.get_object(obj_id) obj_ids = [mpi.manage_object(o) for o in op.operators] mpi.remove_object(obj_id) if mpi.rank0: return obj_ids
def _MPIOperator_assemble_lincomb(operators, coefficients, name): operators = [mpi.get_object(op) for op in operators] return mpi.manage_object(operators[0].assemble_lincomb(operators, coefficients, name=name))
def mpi_wrap_discretization(local_discretizations, use_with=False, with_apply2=False, pickle_subtypes=True, array_type=MPIVectorArray): """Wrap MPI distributed local |Discretizations| to a global |Discretization| on rank 0. Given MPI distributed local |Discretizations| referred to by the `~pymor.tools.mpi.ObjectId` `local_discretizations`, return a new |Discretization| which manages these distributed discretizations from rank 0. This is done by first wrapping all |Operators| of the |Discretization| using :func:`~pymor.operators.mpi.mpi_wrap_operator`. Alternatively, `local_discretizations` can be a callable (with no arguments) which is then called to instantiate the local |Discretizations| on each rank. When `use_with` is `False`, an :class:`MPIDiscretization` is instatiated with the wrapped operators. A call to :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` will then use an MPI parallel call to the :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` methods of the wrapped local |Discretizations| to obtain the solution. This is usually what you want when the actual solve is performed by an implementation in the external solver. When `use_with` is `True`, :meth:`~pymor.core.interfaces.ImmutableInterface.with_` is called on the local |Discretization| on rank 0, to obtain a new |Discretization| with the wrapped MPI |Operators|. This is mainly useful when the local discretizations are generic |Discretizations| as in :mod:`pymor.discretizations.basic` and :meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve` is implemented directly in pyMOR via operations on the contained |Operators|. Parameters ---------- local_discretizations :class:`~pymor.tools.mpi.ObjectId` of the local |Discretizations| on each rank or a callable generating the |Discretizations|. use_with See above. with_apply2 See :class:`~pymor.operators.mpi.MPIOperator`. pickle_subtypes See :class:`~pymor.operators.mpi.MPIOperator`. array_type See :class:`~pymor.operators.mpi.MPIOperator`. """ if not isinstance(local_discretizations, mpi.ObjectId): local_discretizations = mpi.call(mpi.function_call_manage, local_discretizations) operators, functionals, vectors, products = \ mpi.call(_mpi_wrap_discretization_manage_operators, local_discretizations) operators = { k: mpi_wrap_operator(v, with_apply2=with_apply2, pickle_subtypes=pickle_subtypes, array_type=array_type) if v else None for k, v in operators.items() } functionals = { k: mpi_wrap_operator(v, functional=True, with_apply2=with_apply2, pickle_subtypes=pickle_subtypes, array_type=array_type) if v else None for k, v in functionals.items() } vectors = { k: mpi_wrap_operator(v, vector=True, with_apply2=with_apply2, pickle_subtypes=pickle_subtypes, array_type=array_type) if v else None for k, v in vectors.items() } products = { k: mpi_wrap_operator(v, with_apply2=with_apply2, pickle_subtypes=pickle_subtypes, array_type=array_type) if v else None for k, v in products.items() } if products else None if use_with: d = mpi.get_object(local_discretizations) visualizer = MPIVisualizer(local_discretizations) return d.with_(operators=operators, functionals=functionals, vector_operators=vectors, products=products, visualizer=visualizer, cache_region=None) else: return MPIDiscretization(local_discretizations, operators, functionals, vectors, products, pickle_subtypes=pickle_subtypes, array_type=array_type)
def _MPIVector_axpy(obj_id, alpha, x_obj_id): obj = mpi.get_object(obj_id) x = mpi.get_object(x_obj_id) obj.axpy(alpha, x)