예제 #1
0
    def __init__(
        self,
        ary: np.ndarray,
        *,
        acenter: Optional[AttributeCenter] = None,
        name: Optional[str] = None,
    ):
        """
        :param ary: if this is an :class:`object` array, each entry is considered
            a different component and will consist of a separate
            :class:`DataItem`.
        """

        if ary.dtype.char == "O":
            from pytools import is_single_valued
            if not is_single_valued(iary.shape for iary in ary):
                raise ValueError("'ary' components must have the same size")

            items = tuple([
                _data_item_from_numpy(iary, name=f"{name}_{i}")
                for i, iary in enumerate(ary)
            ])
        else:
            items = (_data_item_from_numpy(ary, name=name), )

        super().__init__(items, name=name, acenter=acenter)
        self.ary = ary
예제 #2
0
    def map_min(self, expr, enclosing_prec, *args, **kwargs):
        from pytools import is_single_valued
        if is_single_valued(expr.children):
            return self.rec(expr.children[0], enclosing_prec)

        what = type(expr).__name__.lower()
        return self.format(r"\%s(%s)",
                what, self.join_rec(", ", expr.children, PREC_NONE, *args, **kwargs))
예제 #3
0
    def map_min(self, expr, enclosing_prec, *args, **kwargs):
        from pytools import is_single_valued
        if is_single_valued(expr.children):
            return self.rec(expr.children[0], enclosing_prec)

        what = type(expr).__name__.lower()
        return self.format(r"\%s(%s)",
                what, self.join_rec(", ", expr.children, PREC_NONE, *args, **kwargs))
예제 #4
0
def make_dict_of_named_arrays(data: Dict[str, Array]) -> DictOfNamedArrays:
    """Make a :class:`DictOfNamedArrays` object and ensure that all arrays
    share the same namespace.

    :param data: member keys and arrays
    """
    if not is_single_valued(ary.namespace for ary in data.values()):
        raise ValueError("arrays do not have same namespace")

    return DictOfNamedArrays(data)
예제 #5
0
    def combine(dtypes):
        # dtypes may just be a generator expr
        dtypes = list(dtypes)

        from loopy.types import LoopyType, NumpyType
        assert all(isinstance(dtype, LoopyType) for dtype in dtypes)

        if not all(isinstance(dtype, NumpyType) for dtype in dtypes):
            from pytools import is_single_valued, single_valued
            if not is_single_valued(dtypes):
                raise TypeInferenceFailure(
                        "Nothing known about operations between '%s'"
                        % ", ".join(str(dt) for dt in dtypes))

            return single_valued(dtypes)

        dtypes = [dtype.dtype for dtype in dtypes]

        result = dtypes.pop()
        while dtypes:
            other = dtypes.pop()

            if result.fields is None and other.fields is None:
                if (result, other) in [
                        (np.int32, np.float32), (np.float32, np.int32)]:
                    # numpy makes this a double. I disagree.
                    result = np.dtype(np.float32)
                else:
                    result = (
                            np.empty(0, dtype=result)
                            + np.empty(0, dtype=other)
                            ).dtype

            elif result.fields is None and other.fields is not None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                result = other
            elif result.fields is not None and other.fields is None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                pass
            else:
                if result is not other:
                    raise TypeInferenceFailure(
                            "nothing known about result of operation on "
                            "'%s' and '%s'" % (result, other))

        return NumpyType(result)
예제 #6
0
def merge_disjoint_meshes(meshes, skip_tests=False):
    if not meshes:
        raise ValueError("must pass at least one mesh")

    from pytools import is_single_valued
    if not is_single_valued(mesh.ambient_dim for mesh in meshes):
        raise ValueError("all meshes must share the same ambient dimension")

    # {{{ assemble combined vertex array

    ambient_dim = meshes[0].ambient_dim
    nvertices = sum(
            mesh.vertices.shape[-1]
            for mesh in meshes)

    vert_dtype = np.find_common_type(
            [mesh.vertices.dtype for mesh in meshes],
            [])
    vertices = np.empty(
            (ambient_dim, nvertices), vert_dtype)

    current_vert_base = 0
    vert_bases = []
    for mesh in meshes:
        mesh_nvert = mesh.vertices.shape[-1]
        vertices[:, current_vert_base:current_vert_base+mesh_nvert] = \
                mesh.vertices

        vert_bases.append(current_vert_base)
        current_vert_base += mesh_nvert

    # }}}

    # {{{ assemble new groups list

    new_groups = []

    for mesh, vert_base in zip(meshes, vert_bases):
        for group in mesh.groups:
            new_vertex_indices = group.vertex_indices + vert_base
            new_group = group.copy(vertex_indices=new_vertex_indices)
            new_groups.append(new_group)

    # }}}

    from meshmode.mesh import Mesh
    return Mesh(vertices, new_groups, skip_tests=skip_tests)
예제 #7
0
    def map_is_shape_class(self, expr):
        discr = self.places.get_discretization(expr.dofdesc.geometry,
                                               expr.dofdesc.discr_stage)

        from pytools import is_single_valued
        if not is_single_valued(
                type(grp.mesh_el_group) for grp in discr.groups):
            # FIXME Conceivably, one could stick per-group bools into a DOFArray.
            raise NotImplementedError(
                "non-homogeneous element groups are not supported")

        from meshmode.mesh import _ModepyElementGroup
        meg = discr.groups[0].mesh_el_group
        if isinstance(meg, _ModepyElementGroup):
            return isinstance(meg._modepy_shape, expr.shape)
        else:
            raise TypeError(
                f"element type not supported: '{type(meg).__name__}'")
예제 #8
0
def _join_data_items(items: Tuple[DataItem, ...],
                     *,
                     parent: Optional[Element] = None) -> DataItem:
    r"""Joins several :class:`DataItem`\ s using a :attr:`DataItemType.Function`
    as::

        JOIN($0, $1, ...)

    (Used for describing vectors from scalar data.)
    See the `Xdmf Function docs <https://www.xdmf.org/index.php/XDMF_Model_and_Format#Function>`__
    for more information.

    :returns: the newly created :class:`DataItem` that joins the input items.
    """     # noqa: E501

    if len(items) == 1:
        item = items[0]
    else:
        from pytools import is_single_valued
        if not is_single_valued(item.dimensions for item in items):
            raise ValueError("items must have the same dimension")

        dimensions = (len(items), ) + items[0].dimensions
        ids = ", ".join(f"${i}" for i in range(dimensions[0]))

        item = DataItem(
            dimensions=dimensions,
            itype=DataItemType.Function,
            ntype=None,
            precision=None,
            function=f"JOIN({ids})",
            endian=None,
            dformat=None,
        )

        for subitem in items:
            item.append(subitem)

    if parent is not None:
        parent.append(item)

    return item
예제 #9
0
def _find_array_context_from_args_in_context(context,
                                             supplied_array_context=None):
    from arraycontext import PyOpenCLArrayContext
    array_contexts = []
    if supplied_array_context is not None:
        if not isinstance(supplied_array_context, PyOpenCLArrayContext):
            raise TypeError(
                "first argument (if supplied) must be a PyOpenCLArrayContext, "
                f"got '{type(supplied_array_context).__name__}'")

        array_contexts.append(supplied_array_context)
    del supplied_array_context

    def look_for_array_contexts(ary):
        if isinstance(ary, DOFArray):
            if ary.array_context is not None:
                array_contexts.append(ary.array_context)
        elif isinstance(ary, np.ndarray) and ary.dtype.char == "O":
            for idx in np.ndindex(ary.shape):
                look_for_array_contexts(ary[idx])
        else:
            pass

    for val in context.values():
        look_for_array_contexts(val)

    if array_contexts:
        from pytools import is_single_valued
        if not is_single_valued(array_contexts):
            raise ValueError("arguments do not agree on an array context")

        array_context = array_contexts[0]
    else:
        array_context = None

    if not isinstance(array_context, PyOpenCLArrayContext):
        raise TypeError(
            "array context (derived from arguments) is not a "
            f"PyOpenCLArrayContext: '{type(array_context).__name__}'")

    return array_context
예제 #10
0
    def __call__(self,
                 array_context: Optional[ArrayContext] = None,
                 *,
                 profile_data=None,
                 log_quantities=None,
                 **context):
        """
        :arg array_context: only needs to be supplied if no instances of
            :class:`~meshmode.dof_array.DOFArray` with a
            :class:`~meshmode.array_context.ArrayContext`
            are supplied as part of *context*.
        """

        # {{{ figure array context

        array_contexts = []
        if array_context is not None:
            if not isinstance(array_context, ArrayContext):
                raise TypeError(
                    "first positional argument (if supplied) must be "
                    "an ArrayContext")

            array_contexts.append(array_context)
        del array_context

        def look_for_array_contexts(ary):
            if isinstance(ary, DOFArray):
                if ary.array_context is not None:
                    array_contexts.append(ary.array_context)
            elif isinstance(ary, np.ndarray) and ary.dtype.char == "O":
                for idx in np.ndindex(ary.shape):
                    look_for_array_contexts(ary[idx])
            else:
                pass

        for key, val in context.items():
            look_for_array_contexts(val)

        if array_contexts:
            from pytools import is_single_valued
            if not is_single_valued(array_contexts):
                raise ValueError("arguments do not agree on an array context")

            array_context = array_contexts[0]
        else:
            raise ValueError(
                "no array context given or available from arguments")

        # }}}

        # {{{ discrwb-scope evaluation

        if any((result_var.name not in
                self.discrwb._discr_scoped_subexpr_name_to_value)
               for result_var in self.discr_code.result):
            # need to do discrwb-scope evaluation
            discrwb_eval_context: Dict[str, ResultType] = {}
            self.discr_code.execute(
                self.exec_mapper_factory(array_context, discrwb_eval_context,
                                         self))

        # }}}

        return self.eval_code.execute(self.exec_mapper_factory(
            array_context, context, self),
                                      profile_data=profile_data,
                                      log_quantities=log_quantities)
예제 #11
0
    def combine(dtype_sets):
        """
        :arg dtype_sets: A list of lists, where each of the inner lists
            consists of either zero or one type. An empty list is
            consistent with any type. A list with a type requires
            that an operation be valid in conjunction with that type.
        """
        dtype_sets = list(dtype_sets)

        from loopy.types import LoopyType, NumpyType
        assert all(
            all(isinstance(dtype, LoopyType) for dtype in dtype_set)
            for dtype_set in dtype_sets)
        assert all(0 <= len(dtype_set) <= 1 for dtype_set in dtype_sets)

        from pytools import is_single_valued

        dtypes = [dtype for dtype_set in dtype_sets for dtype in dtype_set]

        if not all(isinstance(dtype, NumpyType) for dtype in dtypes):
            if not is_single_valued(dtypes):
                raise TypeInferenceFailure(
                    "Nothing known about operations between '%s'" %
                    ", ".join(str(dtype) for dtype in dtypes))

            return [dtypes[0]]

        numpy_dtypes = [dtype.dtype for dtype in dtypes]

        if not numpy_dtypes:
            return []

        if is_single_valued(numpy_dtypes):
            return [dtypes[0]]

        result = numpy_dtypes.pop()
        while numpy_dtypes:
            other = numpy_dtypes.pop()

            if result.fields is None and other.fields is None:
                if (result, other) in [(np.int32, np.float32),
                                       (np.float32, np.int32)]:
                    # numpy makes this a double. I disagree.
                    result = np.dtype(np.float32)
                else:
                    result = (np.empty(0, dtype=result) +
                              np.empty(0, dtype=other)).dtype

            elif result.fields is None and other.fields is not None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                result = other
            elif result.fields is not None and other.fields is None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                pass
            else:
                if result is not other:
                    raise TypeInferenceFailure(
                        "nothing known about result of operation on "
                        "'%s' and '%s'" % (result, other))

        return [NumpyType(result)]
예제 #12
0
파일: fmmlib.py 프로젝트: inducer/pytential
    def __init__(self, code, queue, geo_data, dtype,
            qbx_order, fmm_level_to_order,
            source_extra_kwargs,
            kernel_extra_kwargs):

        self.code = code
        self.queue = queue

        # FMMLib is CPU-only. This wrapper gets the geometry out of
        # OpenCL-land.
        self.geo_data = ToHostTransferredGeoDataWrapper(queue, geo_data)

        self.qbx_order = qbx_order

        # {{{ digest out_kernels

        from sumpy.kernel import AxisTargetDerivative, DirectionalSourceDerivative

        k_names = []
        source_deriv_names = []

        def is_supported_helmknl(knl):
            if isinstance(knl, DirectionalSourceDerivative):
                source_deriv_name = knl.dir_vec_name
                knl = knl.inner_kernel
            else:
                source_deriv_name = None

            if isinstance(knl, HelmholtzKernel) and knl.dim in [2, 3]:
                k_names.append(knl.helmholtz_k_name)
                source_deriv_names.append(source_deriv_name)
                return True
            elif isinstance(knl, LaplaceKernel) and knl.dim in [2, 3]:
                k_names.append(None)
                source_deriv_names.append(source_deriv_name)
                return True

            return False

        ifgrad = False
        outputs = []
        for out_knl in self.code.out_kernels:
            if is_supported_helmknl(out_knl):
                outputs.append(())
            elif (isinstance(out_knl, AxisTargetDerivative)
                    and is_supported_helmknl(out_knl.inner_kernel)):
                outputs.append((out_knl.axis,))
                ifgrad = True
            else:
                raise NotImplementedError(
                        "only the 2/3D Laplace and Helmholtz kernel "
                        "and their derivatives are supported")

        from pytools import is_single_valued
        if not is_single_valued(source_deriv_names):
            raise ValueError("not all kernels passed are the same in "
                    "whether they represent a source derivative")

        source_deriv_name = source_deriv_names[0]
        self.outputs = outputs

        # }}}

        from pytools import single_valued
        k_name = single_valued(k_names)
        if k_name is None:
            helmholtz_k = 0
        else:
            helmholtz_k = kernel_extra_kwargs[k_name]

        dipole_vec = None
        if source_deriv_name is not None:
            dipole_vec = np.array([
                    d_i.get(queue=queue)
                    for d_i in source_extra_kwargs[source_deriv_name]],
                    order="F")

        def inner_fmm_level_to_nterms(tree, level):
            from sumpy.kernel import LaplaceKernel, HelmholtzKernel
            if helmholtz_k == 0:
                return fmm_level_to_order(
                        LaplaceKernel(tree.dimensions),
                        frozenset(), tree, level)
            else:
                return fmm_level_to_order(
                        HelmholtzKernel(tree.dimensions),
                        frozenset([("k", helmholtz_k)]), tree, level)

        super(QBXFMMLibExpansionWrangler, self).__init__(
                self.geo_data.tree(),

                helmholtz_k=helmholtz_k,
                dipole_vec=dipole_vec,
                dipoles_already_reordered=True,

                fmm_level_to_nterms=inner_fmm_level_to_nterms,

                ifgrad=ifgrad)
예제 #13
0
    def __init__(self,
                 code,
                 queue,
                 geo_data,
                 dtype,
                 qbx_order,
                 fmm_level_to_order,
                 source_extra_kwargs,
                 kernel_extra_kwargs,
                 _use_target_specific_qbx=None):
        self.code = code
        self.queue = queue

        # FMMLib is CPU-only. This wrapper gets the geometry out of
        # OpenCL-land.

        from pytential.qbx.utils import ToHostTransferredGeoDataWrapper
        geo_data = ToHostTransferredGeoDataWrapper(queue, geo_data)

        self.geo_data = geo_data
        self.qbx_order = qbx_order

        # {{{ digest target_kernels

        ifgrad = False
        outputs = []
        source_deriv_names = []
        k_names = []

        using_tsqbx = (
            _use_target_specific_qbx
            # None means use by default if possible
            or _use_target_specific_qbx is None)

        for out_knl in self.code.target_kernels:
            if not self.is_supported_helmknl_for_tsqbx(out_knl):
                if _use_target_specific_qbx:
                    raise ValueError("not all kernels passed support TSQBX")
                using_tsqbx = False

            if self.is_supported_helmknl(out_knl):
                outputs.append(())
                no_target_deriv_knl = out_knl

            elif (isinstance(out_knl, AxisTargetDerivative)
                  and self.is_supported_helmknl(out_knl.inner_kernel)):
                outputs.append((out_knl.axis, ))
                ifgrad = True
                no_target_deriv_knl = out_knl.inner_kernel

            else:
                raise ValueError("only the 2/3D Laplace and Helmholtz kernel "
                                 "and their derivatives are supported")

            source_deriv_names.append(
                no_target_deriv_knl.dir_vec_name if isinstance(
                    no_target_deriv_knl, DirectionalSourceDerivative
                ) else None)

            base_knl = out_knl.get_base_kernel()
            k_names.append(base_knl.helmholtz_k_name if isinstance(
                base_knl, HelmholtzKernel) else None)

        self.using_tsqbx = using_tsqbx
        self.outputs = outputs

        from pytools import is_single_valued

        if not is_single_valued(source_deriv_names):
            raise ValueError("not all kernels passed are the same in "
                             "whether they represent a source derivative")

        source_deriv_name = source_deriv_names[0]

        if not is_single_valued(k_names):
            raise ValueError("not all kernels passed have the same "
                             "Helmholtz parameter")

        k_name = k_names[0]

        if k_name is None:
            helmholtz_k = 0
        else:
            helmholtz_k = kernel_extra_kwargs[k_name]

        # }}}

        dipole_vec = None
        if source_deriv_name is not None:
            dipole_vec = np.array([
                d_i.get(queue=queue)
                for d_i in source_extra_kwargs[source_deriv_name]
            ],
                                  order="F")

        def inner_fmm_level_to_nterms(tree, level):
            if helmholtz_k == 0:
                return fmm_level_to_order(LaplaceKernel(tree.dimensions),
                                          frozenset(), tree, level)
            else:
                return fmm_level_to_order(HelmholtzKernel(tree.dimensions),
                                          frozenset([("k", helmholtz_k)]),
                                          tree, level)

        super().__init__(geo_data.tree(),
                         helmholtz_k=helmholtz_k,
                         dipole_vec=dipole_vec,
                         dipoles_already_reordered=True,
                         fmm_level_to_nterms=inner_fmm_level_to_nterms,
                         rotation_data=geo_data,
                         ifgrad=ifgrad)
예제 #14
0
    def combine(dtype_sets):
        """
        :arg dtype_sets: A list of lists, where each of the inner lists
            consists of either zero or one type. An empty list is
            consistent with any type. A list with a type requires
            that an operation be valid in conjunction with that type.
        """
        dtype_sets = list(dtype_sets)

        from loopy.types import LoopyType, NumpyType
        assert all(
                all(isinstance(dtype, LoopyType) for dtype in dtype_set)
                for dtype_set in dtype_sets)
        assert all(
                0 <= len(dtype_set) <= 1
                for dtype_set in dtype_sets)

        from pytools import is_single_valued

        dtypes = [dtype
                for dtype_set in dtype_sets
                for dtype in dtype_set]

        if not all(isinstance(dtype, NumpyType) for dtype in dtypes):
            if not is_single_valued(dtypes):
                raise TypeInferenceFailure(
                        "Nothing known about operations between '%s'"
                        % ", ".join(str(dtype) for dtype in dtypes))

            return [dtypes[0]]

        numpy_dtypes = [dtype.dtype for dtype in dtypes]

        if not numpy_dtypes:
            return []

        if is_single_valued(numpy_dtypes):
            return [dtypes[0]]

        result = numpy_dtypes.pop()
        while numpy_dtypes:
            other = numpy_dtypes.pop()

            if result.fields is None and other.fields is None:
                if (result, other) in [
                        (np.int32, np.float32), (np.float32, np.int32)]:
                    # numpy makes this a double. I disagree.
                    result = np.dtype(np.float32)
                else:
                    result = (
                            np.empty(0, dtype=result)
                            + np.empty(0, dtype=other)
                            ).dtype

            elif result.fields is None and other.fields is not None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                result = other
            elif result.fields is not None and other.fields is None:
                # assume the non-native type takes over
                # (This is used for vector types.)
                pass
            else:
                if result is not other:
                    raise TypeInferenceFailure(
                            "nothing known about result of operation on "
                            "'%s' and '%s'" % (result, other))

        return [NumpyType(result)]
예제 #15
0
    def __init__(self, places, auto_where=None):
        """
        :arg places: a scalar, tuple of or mapping of symbolic names to
            geometry objects. Supported objects are
            :class:`~pytential.source.PotentialSource`,
            :class:`~potential.target.TargetBase` and
            :class:`~meshmode.discretization.Discretization`. If this is
            a mapping, the keys that are strings must be valid Python identifiers.
        :arg auto_where: location identifier for each geometry object, used
            to denote specific discretizations, e.g. in the case where
            *places* is a :class:`~pytential.source.LayerPotentialSourceBase`.
            By default, we assume
            :class:`~pytential.symbolic.primitives.DEFAULT_SOURCE` and
            :class:`~pytential.symbolic.primitives.DEFAULT_TARGET` for
            sources and targets, respectively.
        """

        from pytential.target import TargetBase
        from pytential.source import PotentialSource
        from pytential.qbx import QBXLayerPotentialSource
        from meshmode.discretization import Discretization

        # {{{ construct dict

        self.places = {}
        self.caches = {}

        auto_source, auto_target = _prepare_auto_where(auto_where)
        if isinstance(places, QBXLayerPotentialSource):
            self.places[auto_source.geometry] = places
            auto_target = auto_source
        elif isinstance(places, TargetBase):
            self.places[auto_target.geometry] = places
            auto_source = auto_target
        if isinstance(places, (Discretization, PotentialSource)):
            self.places[auto_source.geometry] = places
            self.places[auto_target.geometry] = places
        elif isinstance(places, tuple):
            source_discr, target_discr = places
            self.places[auto_source.geometry] = source_discr
            self.places[auto_target.geometry] = target_discr
        else:
            self.places = places

        self.auto_where = (auto_source, auto_target)

        # }}}

        # {{{ validate

        # check allowed identifiers
        for name in self.places:
            if not isinstance(name, str):
                continue
            if not _is_valid_identifier(name):
                raise ValueError("`{}` is not a valid identifier".format(name))

        # check allowed types
        for p in six.itervalues(self.places):
            if not isinstance(p,
                              (PotentialSource, TargetBase, Discretization)):
                raise TypeError(
                    "Values in 'places' must be discretization, targets "
                    "or layer potential sources.")

        # check ambient_dim
        from pytools import is_single_valued
        ambient_dims = [p.ambient_dim for p in six.itervalues(self.places)]
        if not is_single_valued(ambient_dims):
            raise RuntimeError(
                "All 'places' must have the same ambient dimension.")

        self.ambient_dim = ambient_dims[0]
예제 #16
0
    def __init__(self, places, auto_where=None):
        r"""
        :arg places: a scalar, tuple of or mapping of symbolic names to
            geometry objects. Supported objects are
            :class:`~pytential.source.PotentialSource`,
            :class:`~pytential.target.TargetBase` and
            :class:`~meshmode.discretization.Discretization`. If this is
            a mapping, the keys that are strings must be valid Python identifiers.
            The tuple should contain only two entries, denoting the source and
            target geometries for layer potential evaluation, identified by
            *auto_where*.

        :arg auto_where: a single or a tuple of two
            :class:`~pytential.symbolic.primitives.DOFDescriptor`\ s, or values
            that can be converted to one using
            :func:`~pytential.symbolic.primitives.as_dofdesc`. The two
            descriptors are used to define the default source and target
            geometries for layer potential evaluations.
            By default, they are set to
            :class:`~pytential.symbolic.primitives.DEFAULT_SOURCE` and
            :class:`~pytential.symbolic.primitives.DEFAULT_TARGET` for
            sources and targets, respectively.
        """

        from pytential.target import TargetBase
        from pytential.source import PotentialSource
        from pytential.qbx import QBXLayerPotentialSource
        from meshmode.discretization import Discretization

        # {{{ construct dict

        self.places = {}
        self.caches = {}

        auto_source, auto_target = _prepare_auto_where(auto_where)
        if isinstance(places, QBXLayerPotentialSource):
            self.places[auto_source.geometry] = places
            auto_target = auto_source
        elif isinstance(places, TargetBase):
            self.places[auto_target.geometry] = places
            auto_source = auto_target
        if isinstance(places, (Discretization, PotentialSource)):
            self.places[auto_source.geometry] = places
            self.places[auto_target.geometry] = places
        elif isinstance(places, tuple):
            source_discr, target_discr = places
            self.places[auto_source.geometry] = source_discr
            self.places[auto_target.geometry] = target_discr
        else:
            self.places = places

        self.auto_where = (auto_source, auto_target)

        # }}}

        # {{{ validate

        # check auto_where
        if auto_source.geometry not in self.places:
            raise ValueError("'auto_where' source geometry is not in the "
                             f"collection: '{auto_source.geometry}'")

        if auto_target.geometry not in self.places:
            raise ValueError("'auto_where' target geometry is not in the "
                             f"collection: '{auto_target.geometry}'")

        # check allowed identifiers
        for name in self.places:
            if not isinstance(name, str):
                continue
            if not _is_valid_identifier(name):
                raise ValueError(f"'{name}' is not a valid identifier")

        # check allowed types
        for p in self.places.values():
            if not isinstance(p,
                              (PotentialSource, TargetBase, Discretization)):
                raise TypeError(
                    "Values in 'places' must be discretization, targets "
                    f"or layer potential sources, got '{type(p).__name__}'")

        # check ambient_dim
        from pytools import is_single_valued
        ambient_dims = [p.ambient_dim for p in self.places.values()]
        if not is_single_valued(ambient_dims):
            raise RuntimeError(
                "All 'places' must have the same ambient dimension.")

        self.ambient_dim = ambient_dims[0]
예제 #17
0
    def __init__(self, code_container, queue, tree,
            near_field_table, dtype,
            fmm_level_to_order,
            quad_order,
            potential_kind=1,
            source_extra_kwargs=None,
            kernel_extra_kwargs=None,
            self_extra_kwargs=None,
            list1_extra_kwargs=None,
            *args, **kwargs):
        self.code = code_container
        self.queue = queue

        tree = tree.get(queue)
        self.tree = tree

        self.dtype = dtype
        self.quad_order = quad_order
        self.potential_kind = potential_kind

        # {{{ digest out_kernels

        ifgrad = False
        outputs = []
        source_deriv_names = []
        k_names = []

        for out_knl in self.code.out_kernels:

            if self.is_supported_helmknl(out_knl):
                outputs.append(())
                no_target_deriv_knl = out_knl

            elif (isinstance(out_knl, AxisTargetDerivative)
                    and self.is_supported_helmknl(out_knl.inner_kernel)):
                outputs.append((out_knl.axis,))
                ifgrad = True
                no_target_deriv_knl = out_knl.inner_kernel

            else:
                raise ValueError(
                        "only the 2/3D Laplace and Helmholtz kernel "
                        "and their derivatives are supported")

            source_deriv_names.append(no_target_deriv_knl.dir_vec_name
                    if isinstance(no_target_deriv_knl, DirectionalSourceDerivative)
                    else None)

            base_knl = out_knl.get_base_kernel()
            k_names.append(base_knl.helmholtz_k_name
                    if isinstance(base_knl, HelmholtzKernel)
                    else None)

        self.outputs = outputs

        from pytools import is_single_valued

        if not is_single_valued(source_deriv_names):
            raise ValueError("not all kernels passed are the same in "
                    "whether they represent a source derivative")

        source_deriv_name = source_deriv_names[0]

        if not is_single_valued(k_names):
            raise ValueError("not all kernels passed have the same "
                    "Helmholtz parameter")

        k_name = k_names[0]

        if k_name is None:
            helmholtz_k = 0
        else:
            helmholtz_k = kernel_extra_kwargs[k_name]

        # }}}

        # {{{ table setup
        # TODO put this part into the inteferce class

        self.near_field_table = {}
        # list of tables for a single out kernel
        if isinstance(near_field_table, list):
            assert len(self.code.out_kernels) == 1
            self.near_field_table[
                self.code.out_kernels[0].__repr__()
            ] = near_field_table
            self.n_tables = len(near_field_table)

        # single table
        elif isinstance(near_field_table, NearFieldInteractionTable):
            assert len(self.code.out_kernels) == 1
            self.near_field_table[self.code.out_kernels[0].__repr__()] = [
                near_field_table
            ]
            self.n_tables = 1

        # dictionary of lists of tables
        elif isinstance(near_field_table, dict):
            self.n_tables = dict()
            for out_knl in self.code.out_kernels:
                if repr(out_knl) not in near_field_table:
                    raise RuntimeError(
                            "Missing nearfield table for %s." % repr(out_knl))
                if isinstance(near_field_table[repr(out_knl)],
                        NearFieldInteractionTable):
                    near_field_table[repr(out_knl)] = [
                            near_field_table[repr(out_knl)]]
                else:
                    assert isinstance(near_field_table[repr(out_knl)], list)

                self.n_tables[repr(out_knl)] = len(near_field_table[repr(out_knl)])

            self.near_field_table = near_field_table
        else:
            raise RuntimeError("Table type unrecognized.")

        # TODO: make all parameters table-specific (allow using inhomogeneous tables)
        kname = repr(self.code.out_kernels[0])
        self.root_table_source_box_extent = (
                self.near_field_table[kname][0].source_box_extent)
        table_starting_level = np.round(
            np.log(self.tree.root_extent / self.root_table_source_box_extent)
            / np.log(2)
            )
        for kid in range(len(self.code.out_kernels)):
            kname = self.code.out_kernels[kid].__repr__()
            for lev, table in zip(
                    range(len(self.near_field_table[kname])),
                    self.near_field_table[kname]
                    ):
                assert table.quad_order == self.quad_order

                if not table.is_built:
                    raise RuntimeError(
                        "Near field interaction table needs to be built "
                        "prior to being used"
                    )

                table_root_extent = table.source_box_extent * 2 ** lev
                assert (
                    abs(self.root_table_source_box_extent - table_root_extent)
                    < 1e-15
                )

                # If the kernel cannot be scaled,
                # - tree_root_extent must be integral times of table_root_extent
                # - n_tables must be sufficient
                if not isinstance(self.n_tables, dict) and self.n_tables > 1:
                    if (
                        not abs(
                            int(self.tree.root_extent / table_root_extent)
                            * table_root_extent
                            - self.tree.root_extent
                        )
                        < 1e-15
                    ):
                        raise RuntimeError(
                            "Incompatible list of tables: the "
                            "source_box_extent of the root table must "
                            "divide the bounding box's extent by an integer."
                        )

            if not isinstance(self.n_tables, dict) and self.n_tables > 1:
                # this checks that the boxes at the highest level are covered
                if (
                    not tree.nlevels
                    <= len(self.near_field_table[kname]) + table_starting_level
                ):
                    raise RuntimeError(
                        "Insufficient list of tables: the "
                        "finest level mesh cells at level "
                        + str(tree.nlevels)
                        + " are not covered."
                    )

                # the check that the boxes at the coarsest level are covered is
                # deferred until trav.target_boxes is passed when invoking
                # eval_direct

        if source_extra_kwargs is None:
            source_extra_kwargs = {}

        if kernel_extra_kwargs is None:
            kernel_extra_kwargs = {}

        if self_extra_kwargs is None:
            self_extra_kwargs = {}

        if list1_extra_kwargs is None:
            list1_extra_kwargs = {}

        self.list1_extra_kwargs = list1_extra_kwargs

        # }}} End table setup

        if not callable(fmm_level_to_order):
            raise TypeError("fmm_level_to_order not passed")

        dipole_vec = None
        if source_deriv_name is not None:
            dipole_vec = np.array([
                    d_i.get(queue=queue)
                    for d_i in source_extra_kwargs[source_deriv_name]],
                    order="F")

        def inner_fmm_level_to_nterms(tree, level):
            if helmholtz_k == 0:
                return fmm_level_to_order(
                        LaplaceKernel(tree.dimensions),
                        frozenset(), tree, level)
            else:
                return fmm_level_to_order(
                        HelmholtzKernel(tree.dimensions),
                        frozenset([("k", helmholtz_k)]), tree, level)

        rotation_data = None
        if 'traversal' in kwargs:
            # add rotation data if traversal is passed as a keyword argument
            from boxtree.pyfmmlib_integration import FMMLibRotationData
            rotation_data = FMMLibRotationData(self.queue, kwargs['traversal'])
        else:
            logger.warning("Rotation data is not utilized since traversal is "
                           "not known to FPNDFMMLibExpansionWrangler.")

        FMMLibExpansionWrangler.__init__(
                self, tree,

                helmholtz_k=helmholtz_k,
                dipole_vec=dipole_vec,
                dipoles_already_reordered=True,

                fmm_level_to_nterms=inner_fmm_level_to_nterms,
                rotation_data=rotation_data,

                ifgrad=ifgrad)
예제 #18
0
def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False):
    if not meshes:
        raise ValueError("must pass at least one mesh")

    from pytools import is_single_valued
    if not is_single_valued(mesh.ambient_dim for mesh in meshes):
        raise ValueError("all meshes must share the same ambient dimension")

    # {{{ assemble combined vertex array

    ambient_dim = meshes[0].ambient_dim
    nvertices = sum(mesh.vertices.shape[-1] for mesh in meshes)

    vert_dtype = np.find_common_type([mesh.vertices.dtype for mesh in meshes],
                                     [])
    vertices = np.empty((ambient_dim, nvertices), vert_dtype)

    current_vert_base = 0
    vert_bases = []
    for mesh in meshes:
        mesh_nvert = mesh.vertices.shape[-1]
        vertices[:, current_vert_base:current_vert_base+mesh_nvert] = \
                mesh.vertices

        vert_bases.append(current_vert_base)
        current_vert_base += mesh_nvert

    # }}}

    # {{{ assemble new groups list

    nodal_adjacency = None
    facial_adjacency_groups = None

    if single_group:
        grp_cls = None
        order = None
        unit_nodes = None
        nodal_adjacency = None
        facial_adjacency_groups = None

        for mesh in meshes:
            if mesh._nodal_adjacency is not None:
                nodal_adjacency = False
            if mesh._facial_adjacency_groups is not None:
                facial_adjacency_groups = False

            for group in mesh.groups:
                if grp_cls is None:
                    grp_cls = type(group)
                    order = group.order
                    unit_nodes = group.unit_nodes
                else:
                    assert type(group) == grp_cls
                    assert group.order == order
                    assert np.array_equal(unit_nodes, group.unit_nodes)

        vertex_indices = np.vstack([
            group.vertex_indices + vert_base
            for mesh, vert_base in zip(meshes, vert_bases)
            for group in mesh.groups
        ])
        nodes = np.hstack(
            [group.nodes for mesh in meshes for group in mesh.groups])

        if not nodes.flags.c_contiguous:
            # hstack stopped producing C-contiguous arrays in numpy 1.14
            nodes = nodes.copy(order="C")

        new_groups = [
            grp_cls(order, vertex_indices, nodes, unit_nodes=unit_nodes)
        ]

    else:
        new_groups = []
        nodal_adjacency = None
        facial_adjacency_groups = None

        for mesh, vert_base in zip(meshes, vert_bases):
            if mesh._nodal_adjacency is not None:
                nodal_adjacency = False
            if mesh._facial_adjacency_groups is not None:
                facial_adjacency_groups = False

            for group in mesh.groups:
                new_vertex_indices = group.vertex_indices + vert_base
                new_group = group.copy(vertex_indices=new_vertex_indices)
                new_groups.append(new_group)

    # }}}

    from meshmode.mesh import Mesh
    return Mesh(vertices,
                new_groups,
                skip_tests=skip_tests,
                nodal_adjacency=nodal_adjacency,
                facial_adjacency_groups=facial_adjacency_groups,
                is_conforming=all(mesh.is_conforming for mesh in meshes))
예제 #19
0
    def __init__(self, cl_context, *,
            multipole_expansion_factory, local_expansion_factory,
            qbx_local_expansion_factory, target_kernels,
            _use_target_specific_qbx):
        self.cl_context = cl_context
        self.multipole_expansion_factory = multipole_expansion_factory
        self.local_expansion_factory = local_expansion_factory
        self.qbx_local_expansion_factory = qbx_local_expansion_factory

        kernel = target_kernels[0].get_base_kernel()
        self.target_kernels = target_kernels

        # {{{ digest target_kernels

        ifgrad = False
        outputs = []
        source_deriv_names = []
        k_names = []

        using_tsqbx = (
                _use_target_specific_qbx
                # None means use by default if possible
                or _use_target_specific_qbx is None)

        for out_knl in target_kernels:
            if not self.is_supported_helmknl_for_tsqbx(out_knl):
                if _use_target_specific_qbx:
                    raise ValueError("not all kernels passed support TSQBX")
                using_tsqbx = False

            if self.is_supported_helmknl(out_knl):
                outputs.append(())
                no_target_deriv_knl = out_knl

            elif (isinstance(out_knl, AxisTargetDerivative)
                    and self.is_supported_helmknl(out_knl.inner_kernel)):
                outputs.append((out_knl.axis,))
                ifgrad = True
                no_target_deriv_knl = out_knl.inner_kernel

            else:
                raise ValueError(
                        "only the 2/3D Laplace and Helmholtz kernel "
                        "and their derivatives are supported")

            source_deriv_names.append(no_target_deriv_knl.dir_vec_name
                    if isinstance(no_target_deriv_knl, DirectionalSourceDerivative)
                    else None)

            base_knl = out_knl.get_base_kernel()
            k_names.append(base_knl.helmholtz_k_name
                    if isinstance(base_knl, HelmholtzKernel)
                    else None)

        self.using_tsqbx = using_tsqbx
        self.outputs = outputs

        from pytools import is_single_valued

        if not is_single_valued(source_deriv_names):
            raise ValueError("not all kernels passed are the same in "
                    "whether they represent a source derivative")

        self.source_deriv_name = source_deriv_names[0]

        if not is_single_valued(k_names):
            raise ValueError("not all kernels passed have the same "
                    "Helmholtz parameter")

        self.k_name = k_names[0]

        # }}}

        super().__init__(kernel.dim, {
            LaplaceKernel: Kernel.LAPLACE,
            HelmholtzKernel: Kernel.HELMHOLTZ,
            }[type(kernel)],
            ifgrad=ifgrad)
예제 #20
0
    def eval(self,
             context=None,
             timing_data=None,
             array_context: Optional[PyOpenCLArrayContext] = None):
        """Evaluate the expression in *self*, using the
        :class:`pyopencl.CommandQueue` *queue* and the
        input variables given in the dictionary *context*.

        :arg timing_data: A dictionary into which timing
            data will be inserted during evaluation.
            (experimental)
        :arg array_context: only needs to be supplied if no instances of
            :class:`~meshmode.dof_array.DOFArray` with a
            :class:`~meshmode.array_context.PyOpenCLArrayContext`
            are supplied as part of *context*.
        :returns: the value of the expression, as a scalar,
            :class:`pyopencl.array.Array`, or an object array of these.
        """

        if context is None:
            context = {}

        # {{{ figure array context

        array_contexts = []
        if array_context is not None:
            if not isinstance(array_context, PyOpenCLArrayContext):
                raise TypeError("first argument (if supplied) must be a "
                                "PyOpenCLArrayContext")

            array_contexts.append(array_context)
        del array_context

        def look_for_array_contexts(ary):
            if isinstance(ary, DOFArray):
                if ary.array_context is not None:
                    array_contexts.append(ary.array_context)
            elif isinstance(ary, np.ndarray) and ary.dtype.char == "O":
                for idx in np.ndindex(ary.shape):
                    look_for_array_contexts(ary[idx])
            else:
                pass

        for key, val in context.items():
            look_for_array_contexts(val)

        if array_contexts:
            from pytools import is_single_valued
            if not is_single_valued(array_contexts):
                raise ValueError("arguments do not agree on an array context")

            array_context = array_contexts[0]
        else:
            array_context = None

        # }}}

        exec_mapper = EvaluationMapper(self,
                                       array_context,
                                       context,
                                       timing_data=timing_data)
        return self.code.execute(exec_mapper)
예제 #21
0
def merge_disjoint_meshes(meshes, skip_tests=False, single_group=False):
    if not meshes:
        raise ValueError("must pass at least one mesh")

    from pytools import is_single_valued
    if not is_single_valued(mesh.ambient_dim for mesh in meshes):
        raise ValueError("all meshes must share the same ambient dimension")

    # {{{ assemble combined vertex array

    ambient_dim = meshes[0].ambient_dim
    nvertices = sum(
            mesh.vertices.shape[-1]
            for mesh in meshes)

    vert_dtype = np.find_common_type(
            [mesh.vertices.dtype for mesh in meshes],
            [])
    vertices = np.empty(
            (ambient_dim, nvertices), vert_dtype)

    current_vert_base = 0
    vert_bases = []
    for mesh in meshes:
        mesh_nvert = mesh.vertices.shape[-1]
        vertices[:, current_vert_base:current_vert_base+mesh_nvert] = \
                mesh.vertices

        vert_bases.append(current_vert_base)
        current_vert_base += mesh_nvert

    # }}}

    # {{{ assemble new groups list

    nodal_adjacency = None
    facial_adjacency_groups = None

    if single_group:
        grp_cls = None
        order = None
        unit_nodes = None
        nodal_adjacency = None
        facial_adjacency_groups = None

        for mesh in meshes:
            if mesh._nodal_adjacency is not None:
                nodal_adjacency = False
            if mesh._facial_adjacency_groups is not None:
                facial_adjacency_groups = False

            for group in mesh.groups:
                if grp_cls is None:
                    grp_cls = type(group)
                    order = group.order
                    unit_nodes = group.unit_nodes
                else:
                    assert type(group) == grp_cls
                    assert group.order == order
                    assert np.array_equal(unit_nodes, group.unit_nodes)

        vertex_indices = np.vstack([
            group.vertex_indices + vert_base
            for mesh, vert_base in zip(meshes, vert_bases)
            for group in mesh.groups])
        nodes = np.hstack([
            group.nodes
            for mesh in meshes
            for group in mesh.groups])

        if not nodes.flags.c_contiguous:
            # hstack stopped producing C-contiguous arrays in numpy 1.14
            nodes = nodes.copy(order="C")

        new_groups = [
                grp_cls(order, vertex_indices, nodes, unit_nodes=unit_nodes)]

    else:
        new_groups = []
        nodal_adjacency = None
        facial_adjacency_groups = None

        for mesh, vert_base in zip(meshes, vert_bases):
            if mesh._nodal_adjacency is not None:
                nodal_adjacency = False
            if mesh._facial_adjacency_groups is not None:
                facial_adjacency_groups = False

            for group in mesh.groups:
                new_vertex_indices = group.vertex_indices + vert_base
                new_group = group.copy(vertex_indices=new_vertex_indices)
                new_groups.append(new_group)

    # }}}

    from meshmode.mesh import Mesh
    return Mesh(vertices, new_groups, skip_tests=skip_tests,
            nodal_adjacency=nodal_adjacency,
            facial_adjacency_groups=facial_adjacency_groups,
            is_conforming=all(
                mesh.is_conforming
                for mesh in meshes))