Beispiel #1
0
def flat_norm(ary, ord=None) -> float:
    r"""Return an element-wise :math:`\ell^{\text{ord}}` norm of *ary*.

    :arg ary: may be a :class:`DOFArray` or a
        :class:`~arraycontext.ArrayContainer` containing them.
    """

    from numbers import Number
    if isinstance(ary, Number):
        return abs(ary)

    if ord is None:
        ord = 2

    from arraycontext import is_array_container

    import numpy.linalg as la
    if isinstance(ary, DOFArray):
        actx = ary.array_context
        return la.norm([
            actx.np.linalg.norm(actx.np.ravel(ary, order="A"), ord=ord)
            for _, subary in serialize_container(ary)
        ],
                       ord=ord)

    elif is_array_container(ary):
        return la.norm([
            flat_norm(subary, ord=ord)
            for _, subary in serialize_container(ary)
        ],
                       ord=ord)

    raise TypeError(
        f"unsupported array type passed to flat_norm: '{type(ary).__name__}'")
Beispiel #2
0
    def _unflatten_like(_ary, _prototype):
        if isinstance(_prototype, DOFArray):
            group_shapes = [subary.shape for subary in _prototype]
            group_sizes = [subary.size for subary in _prototype]
            group_starts = np.cumsum([0] + group_sizes)

            return _unflatten_dof_array(actx,
                                        _ary,
                                        group_shapes,
                                        group_starts,
                                        strict=True)
        elif is_array_container(_prototype):
            assert type(_ary) is type(_prototype)

            return deserialize_container(_prototype, [
                (_same_key(key1, key2), _unflatten_like(subary, subprototype))
                for (key1, subary), (key2, subprototype) in zip(
                    serialize_container(_ary), serialize_container(_prototype))
            ])
        else:
            if strict:
                raise ValueError(
                    "cannot unflatten array "
                    f"with prototype '{type(_prototype).__name__}'; "
                    "use 'strict=False' to leave the array unchanged")

            assert type(_ary) is type(_prototype)
            return _ary
Beispiel #3
0
    def _unflatten_from_numpy(subary):
        if isinstance(subary, np.ndarray) and subary.dtype.char != "O":
            subary = actx.from_numpy(subary)

        # FIXME: this is doing the recursion itself instead of just using
        # `rec_map_dof_array_container` like `flatten_to_numpy` to catch
        # non-object ndarrays, which `is_array_container` considers as as
        # containers and tries to serialize.
        from arraycontext import map_array_container, is_array_container
        if is_array_container(subary):
            return map_array_container(_unflatten_from_numpy, subary)
        else:
            return _unflatten_dof_array(actx,
                                        subary,
                                        group_shapes,
                                        group_starts,
                                        strict=strict)
Beispiel #4
0
    def __call__(self, ary):
        from meshmode.dof_array import DOFArray
        if is_array_container(ary) and not isinstance(ary, DOFArray):
            return map_array_container(self, ary)

        if not isinstance(ary, DOFArray):
            raise TypeError("non-array passed to discretization connection")

        if ary.shape != (len(self.from_discr.groups), ):
            raise ValueError("invalid shape of incoming resampling data")

        actx = ary.array_context

        @memoize_in(actx,
                    (DirectDiscretizationConnection, "resample_by_mat_knl"))
        def mat_knl():
            knl = make_loopy_program(
                """{[iel, idof, j]:
                    0<=iel<nelements and
                    0<=idof<n_to_nodes and
                    0<=j<n_from_nodes}""",
                "result[to_element_indices[iel], idof] \
                    = sum(j, resample_mat[idof, j] \
                    * ary[from_element_indices[iel], j])", [
                    lp.GlobalArg("result",
                                 None,
                                 shape="nelements_result, n_to_nodes",
                                 offset=lp.auto),
                    lp.GlobalArg("ary",
                                 None,
                                 shape="nelements_vec, n_from_nodes",
                                 offset=lp.auto),
                    lp.ValueArg("nelements_result", np.int32),
                    lp.ValueArg("nelements_vec", np.int32),
                    "...",
                ],
                name="resample_by_mat")

            return knl

        @memoize_in(actx,
                    (DirectDiscretizationConnection, "resample_by_picking_knl")
                    )
        def pick_knl():
            knl = make_loopy_program(
                """{[iel, idof]:
                    0<=iel<nelements and
                    0<=idof<n_to_nodes}""",
                "result[to_element_indices[iel], idof] \
                    = ary[from_element_indices[iel], pick_list[idof]]", [
                    lp.GlobalArg("result",
                                 None,
                                 shape="nelements_result, n_to_nodes",
                                 offset=lp.auto),
                    lp.GlobalArg("ary",
                                 None,
                                 shape="nelements_vec, n_from_nodes",
                                 offset=lp.auto),
                    lp.ValueArg("nelements_result", np.int32),
                    lp.ValueArg("nelements_vec", np.int32),
                    lp.ValueArg("n_from_nodes", np.int32),
                    "...",
                ],
                name="resample_by_picking")

            return knl

        if self.is_surjective:
            result = self.to_discr.empty(actx, dtype=ary.entry_dtype)
        else:
            result = self.to_discr.zeros(actx, dtype=ary.entry_dtype)

        for i_tgrp, cgrp in enumerate(self.groups):
            for i_batch, batch in enumerate(cgrp.batches):
                if not len(batch.from_element_indices):
                    continue

                point_pick_indices = self._resample_point_pick_indices(
                    actx, i_tgrp, i_batch)

                if point_pick_indices is None:
                    actx.call_loopy(
                        mat_knl(),
                        resample_mat=self._resample_matrix(
                            actx, i_tgrp, i_batch),
                        result=result[i_tgrp],
                        ary=ary[batch.from_group_index],
                        from_element_indices=batch.from_element_indices,
                        to_element_indices=batch.to_element_indices)

                else:
                    actx.call_loopy(
                        pick_knl(),
                        pick_list=point_pick_indices,
                        result=result[i_tgrp],
                        ary=ary[batch.from_group_index],
                        from_element_indices=batch.from_element_indices,
                        to_element_indices=batch.to_element_indices)

        return result
Beispiel #5
0
    def __call__(self, ary):
        from meshmode.dof_array import DOFArray
        if is_array_container(ary) and not isinstance(ary, DOFArray):
            return map_array_container(self, ary)

        if not isinstance(ary, DOFArray):
            raise TypeError("non-array passed to discretization connection")

        if ary.shape != (len(self.from_discr.groups), ):
            raise ValueError("invalid shape of incoming resampling data")

        actx = ary.array_context

        @memoize_in(actx, (L2ProjectionInverseDiscretizationConnection,
                           "conn_projection_knl"))
        def kproj():
            return make_loopy_program(
                [
                    "{[iel_init]: 0 <= iel_init < n_to_elements}",
                    "{[idof_init]: 0 <= idof_init < n_to_nodes}",
                    "{[iel]: 0 <= iel < nelements}",
                    "{[i_quad]: 0 <= i_quad < n_to_nodes}",
                    "{[ibasis]: 0 <= ibasis < n_to_nodes}"
                ],
                """
                    result[iel_init, idof_init] = 0 {id=init}
                    ... gbarrier {id=barrier, dep=init}
                    result[to_element_indices[iel], ibasis] =               \
                        result[to_element_indices[iel], ibasis] +           \
                        sum(i_quad, ary[from_element_indices[iel], i_quad]  \
                                    * basis_tabulation[ibasis, i_quad]      \
                                    * weights[i_quad]) {dep=barrier}
                """, [
                    lp.GlobalArg("ary",
                                 None,
                                 shape=("n_from_elements", "n_from_nodes")),
                    lp.GlobalArg(
                        "result", None, shape=("n_to_elements", "n_to_nodes")),
                    lp.GlobalArg("basis_tabulation",
                                 None,
                                 shape=("n_to_nodes", "n_to_nodes")),
                    lp.GlobalArg("weights", None, shape="n_from_nodes"),
                    lp.ValueArg("n_from_elements", np.int32),
                    lp.ValueArg("n_from_nodes", np.int32),
                    lp.ValueArg("n_to_elements", np.int32),
                    lp.ValueArg("n_to_nodes", np.int32), "..."
                ],
                name="conn_projection_knl")

        # compute weights on each refinement of the reference element
        weights = self._batch_weights(actx)

        # perform dot product (on reference element) to get basis coefficients
        c_group_data = []
        for igrp, cgrp in enumerate(self.conn.groups):
            c_batch_data = []
            for ibatch, batch in enumerate(cgrp.batches):
                sgrp = self.from_discr.groups[batch.from_group_index]

                # Generate the basis tabulation matrix
                tabulations = []
                for basis_fn in sgrp.basis_obj().functions:
                    tabulations.append(
                        basis_fn(batch.result_unit_nodes).flatten())
                tabulations = actx.from_numpy(np.asarray(tabulations))

                # NOTE: batch.*_element_indices are reversed here because
                # they are from the original forward connection, but
                # we are going in reverse here. a bit confusing, but
                # saves on recreating the connection groups and batches.
                c_batch_data.append(
                    actx.call_loopy(
                        kproj(),
                        ary=ary[sgrp.index],
                        basis_tabulation=tabulations,
                        weights=weights[igrp, ibatch],
                        from_element_indices=batch.to_element_indices,
                        to_element_indices=batch.from_element_indices,
                        n_to_elements=self.to_discr.groups[igrp].nelements,
                        n_to_nodes=self.to_discr.groups[igrp].nunit_dofs,
                    )["result"])

            c_group_data.append(sum(c_batch_data))
        coefficients = DOFArray(actx, data=tuple(c_group_data))

        @keyed_memoize_in(actx, (L2ProjectionInverseDiscretizationConnection,
                                 "vandermonde_matrix"),
                          lambda grp: grp.discretization_key())
        def vandermonde_matrix(grp):
            from modepy import vandermonde
            vdm = vandermonde(grp.basis_obj().functions, grp.unit_nodes)
            return actx.from_numpy(vdm)

        return DOFArray(
            actx,
            data=tuple(
                actx.einsum("ij,ej->ei",
                            vandermonde_matrix(grp),
                            c_i,
                            arg_names=("vdm", "coeffs"),
                            tagged=(FirstAxisIsElementsTag(), ))
                for grp, c_i in zip(self.to_discr.groups, coefficients)))