Esempio n. 1
0
    def __call__(self, expr, type_hints={}):
        typedict = TypeDict(type_hints)

        while True:
            typedict.change_flag = False

            def infer_for_expr(expr):
                tp = pymbolic.mapper.RecursiveMapper.__call__(
                    self, expr, typedict)
                typedict[expr] = tp

            # Numpy arrays occur either at the top level or in flux
            # expressions. This code handles the top level case.
            from pytools.obj_array import with_object_array_or_scalar
            with_object_array_or_scalar(infer_for_expr, expr)

            if not typedict.change_flag:
                # nothing has changed any more, type information has 'converged'
                break

        # check that type inference completed successfully
        for expr, tp in typedict.iteritems():
            if not isinstance(tp, type_info.FinalType):
                raise RuntimeError(
                    "type inference was unable to deduce "
                    "complete type information for '%s' (only '%s')" %
                    (expr, tp))

        return typedict
Esempio n. 2
0
def write_structured_grid(file_name, mesh, cell_data=[], point_data=[]):
    grid = StructuredGrid(mesh)

    from pytools.obj_array import with_object_array_or_scalar

    def do_reshape(fld):
        return fld.T.copy().reshape(-1)

    for name, field in cell_data:
        reshaped_fld = with_object_array_or_scalar(do_reshape,
                                                   field,
                                                   obj_array_only=True)
        grid.add_pointdata(DataArray(name, reshaped_fld))

    for name, field in point_data:
        reshaped_fld = with_object_array_or_scalar(do_reshape,
                                                   field,
                                                   obj_array_only=True)
        grid.add_pointdata(DataArray(name, reshaped_fld))

    from os.path import exists
    if exists(file_name):
        raise RuntimeError("output file '%s' already exists" % file_name)

    outf = open(file_name, "w")
    AppendedDataXMLGenerator()(grid).write(outf)
    outf.close()
Esempio n. 3
0
def write_structured_grid(file_name, mesh, cell_data=[], point_data=[]):
    grid = StructuredGrid(mesh)

    from pytools.obj_array import with_object_array_or_scalar
    def do_reshape(fld):
        return fld.T.copy().reshape(-1)

    for name, field in cell_data:
        reshaped_fld = with_object_array_or_scalar(do_reshape, field,
                            obj_array_only=True)
        grid.add_pointdata(DataArray(name, reshaped_fld))

    for name, field in point_data:
        reshaped_fld = with_object_array_or_scalar(do_reshape, field,
                obj_array_only=True)
        grid.add_pointdata(DataArray(name, reshaped_fld))

    from os.path import exists
    if exists(file_name):
        raise RuntimeError("output file '%s' already exists"
                % file_name)

    outf = open(file_name, "w")
    AppendedDataXMLGenerator()(grid).write(outf)
    outf.close()
Esempio n. 4
0
    def __call__(self, expr, type_hints={}):
        typedict = TypeDict(type_hints)

        while True:
            typedict.change_flag = False

            def infer_for_expr(expr):
                tp = pymbolic.mapper.RecursiveMapper.__call__(self, expr, typedict)
                typedict[expr] = tp

            # Numpy arrays occur either at the top level or in flux
            # expressions. This code handles the top level case.
            from pytools.obj_array import with_object_array_or_scalar
            with_object_array_or_scalar(infer_for_expr, expr)

            if not typedict.change_flag:
                # nothing has changed any more, type information has 'converged'
                break

        # check that type inference completed successfully
        for expr, tp in typedict.iteritems():
            if not isinstance(tp, type_info.FinalType):
                raise RuntimeError("type inference was unable to deduce "
                        "complete type information for '%s' (only '%s')"
                        % (expr, tp))

        return typedict
Esempio n. 5
0
def separate_by_real_and_imag(data, real_only):
    for name, field in data:
        from pytools.obj_array import log_shape, is_obj_array
        ls = log_shape(field)

        if is_obj_array(field):
            assert len(ls) == 1
            from pytools.obj_array import (oarray_real_copy, oarray_imag_copy,
                                           with_object_array_or_scalar)

            if field[0].dtype.kind == "c":
                if real_only:
                    yield (name,
                           with_object_array_or_scalar(oarray_real_copy,
                                                       field))
                else:
                    yield (name + "_r",
                           with_object_array_or_scalar(oarray_real_copy,
                                                       field))
                    yield (name + "_i",
                           with_object_array_or_scalar(oarray_imag_copy,
                                                       field))
            else:
                yield (name, field)
        else:
            # ls == ()
            if field.dtype.kind == "c":
                yield (name + "_r", field.real.copy())
                yield (name + "_i", field.imag.copy())
            else:
                yield (name, field)
Esempio n. 6
0
def separate_by_real_and_imag(data, real_only):
    for name, field in data:
        from pytools.obj_array import log_shape, is_obj_array
        ls = log_shape(field)

        if is_obj_array(field):
            assert len(ls) == 1
            from pytools.obj_array import (
                    oarray_real_copy, oarray_imag_copy,
                    with_object_array_or_scalar)

            if field[0].dtype.kind == "c":
                if real_only:
                    yield (name,
                            with_object_array_or_scalar(oarray_real_copy, field))
                else:
                    yield (name+"_r",
                            with_object_array_or_scalar(oarray_real_copy, field))
                    yield (name+"_i",
                            with_object_array_or_scalar(oarray_imag_copy, field))
            else:
                yield (name, field)
        else:
            # ls == ()
            if field.dtype.kind == "c":
                yield (name+"_r", field.real.copy())
                yield (name+"_i", field.imag.copy())
            else:
                yield (name, field)
Esempio n. 7
0
    def _resample_and_get(self, queue, vec):
        from pytools.obj_array import with_object_array_or_scalar

        def resample_and_get_one(fld):
            return self.connection(queue, fld).get(queue=queue)

        return with_object_array_or_scalar(resample_and_get_one, vec)
Esempio n. 8
0
    def get_fmm_expansion_wrangler_extra_kwargs(
            self, queue, out_kernels, tree_user_source_ids, arguments, evaluator):
        # This contains things like the Helmholtz parameter k or
        # the normal directions for double layers.

        def reorder_sources(source_array):
            if isinstance(source_array, cl.array.Array):
                return (source_array
                        .with_queue(queue)
                        [tree_user_source_ids]
                        .with_queue(None))
            else:
                return source_array

        kernel_extra_kwargs = {}
        source_extra_kwargs = {}

        from sumpy.tools import gather_arguments, gather_source_arguments
        from pytools.obj_array import with_object_array_or_scalar
        for func, var_dict in [
                (gather_arguments, kernel_extra_kwargs),
                (gather_source_arguments, source_extra_kwargs),
                ]:
            for arg in func(out_kernels):
                var_dict[arg.name] = with_object_array_or_scalar(
                        reorder_sources,
                        evaluator(arguments[arg.name]))

        return kernel_extra_kwargs, source_extra_kwargs
Esempio n. 9
0
def _resample_arg(queue, source, x):
    """
    :arg queue: a :class:`pyopencl.CommandQueue`.
    :arg source: a :class:`pytential.source.LayerPotentialSourceBase` subclass.
        If it is not a layer potential source, no resampling is done.
    :arg x: a :class:`numpy.ndarray`.

    :return: a resampled :class:`numpy.ndarray` (see
        :method:`pytential.source.LayerPotentialSourceBase.resampler`).
    """

    from pytential.source import LayerPotentialSourceBase
    if not isinstance(source, LayerPotentialSourceBase):
        return x

    if not isinstance(x, np.ndarray):
        return x

    if len(x.shape) >= 2:
        raise RuntimeError("matrix variables in kernel arguments")

    def resample(y):
        return source.resampler(queue, cl.array.to_device(queue, y)).get(queue)

    from pytools.obj_array import with_object_array_or_scalar
    return with_object_array_or_scalar(resample, x)
Esempio n. 10
0
    def __call__(self, expr):
        # {{{ collect operators by operand

        from pytential.symbolic.mappers import OperatorCollector
        from pytential.symbolic.primitives import IntG

        operators = [
                op
                for op in OperatorCollector()(expr)
                if isinstance(op, IntG)]

        self.group_to_operators = {}
        for op in operators:
            features = self.op_group_features(op)
            self.group_to_operators.setdefault(features, set()).add(op)

        # }}}

        # Traverse the expression, generate code.

        result = IdentityMapper.__call__(self, expr)

        # Put the toplevel expressions into variables as well.

        from pytools.obj_array import with_object_array_or_scalar
        result = with_object_array_or_scalar(self.assign_to_new_var, result)

        return Code(self.code, result)
Esempio n. 11
0
    def __call__(self, expr):
        # {{{ collect operators by operand

        from pytential.symbolic.mappers import OperatorCollector
        from pytential.symbolic.primitives import IntG

        operators = [
            op for op in OperatorCollector()(expr) if isinstance(op, IntG)
        ]

        self.group_to_operators = {}
        for op in operators:
            features = self.op_group_features(op)
            self.group_to_operators.setdefault(features, set()).add(op)

        # }}}

        # Traverse the expression, generate code.

        result = IdentityMapper.__call__(self, expr)

        # Put the toplevel expressions into variables as well.

        from pytools.obj_array import with_object_array_or_scalar
        result = with_object_array_or_scalar(self.assign_to_new_var, result)

        return Code(self.code, result)
Esempio n. 12
0
    def inverse_mass(self, vec):
        if is_obj_array(vec):
            return with_object_array_or_scalar(
                lambda el: self.inverse_mass(el), vec)

        @memoize_in(self, "elwise_linear_knl")
        def knl():
            knl = lp.make_kernel("""{[k,i,j]:
                    0<=k<nelements and
                    0<=i<ndiscr_nodes_out and
                    0<=j<ndiscr_nodes_in}""",
                                 "result[k,i] = sum(j, mat[i, j] * vec[k, j])",
                                 default_offset=lp.auto,
                                 name="diff")

            knl = lp.split_iname(knl, "i", 16, inner_tag="l.0")
            return lp.tag_inames(knl, dict(k="g.0"))

        discr = self.volume_discr

        result = discr.empty(queue=vec.queue, dtype=vec.dtype)

        for grp in discr.groups:
            matrix = self.get_inverse_mass_matrix(grp, vec.dtype)

            knl()(vec.queue,
                  mat=matrix,
                  result=grp.view(result),
                  vec=grp.view(vec))

        return result / self.vol_jacobian()
Esempio n. 13
0
def vector_from_device(queue, vec):
    from pytools.obj_array import with_object_array_or_scalar

    def from_dev(ary):
        return ary.get(queue=queue)

    return with_object_array_or_scalar(from_dev, vec)
Esempio n. 14
0
    def _resample_and_get(self, queue, vec):
        from pytools.obj_array import with_object_array_or_scalar

        def resample_and_get_one(fld):
            return self.connection(queue, fld).get(queue=queue)

        return with_object_array_or_scalar(resample_and_get_one, vec)
Esempio n. 15
0
    def __call__(self, queue, profile_data=None, log_quantities=None, **context):
        import pyopencl.array as cl_array

        def replace_queue(a):
            if isinstance(a, cl_array.Array):
                return a.with_queue(queue)
            else:
                return a

        from pytools.obj_array import with_object_array_or_scalar

        # {{{ discrwb-scope evaluation

        if any(
                (result_var.name not in
                    self.discrwb._discr_scoped_subexpr_name_to_value)
                for result_var in self.discr_code.result):
            # need to do discrwb-scope evaluation
            discrwb_eval_context = {}
            self.discr_code.execute(
                    self.exec_mapper_factory(queue, discrwb_eval_context, self))

        # }}}

        new_context = {}
        for name, var in six.iteritems(context):
            new_context[name] = with_object_array_or_scalar(replace_queue, var)

        return self.eval_code.execute(
                self.exec_mapper_factory(queue, new_context, self),
                profile_data=profile_data,
                log_quantities=log_quantities)
Esempio n. 16
0
    def reorder_potentials(self, potentials):
        from pytools.obj_array import is_obj_array, with_object_array_or_scalar
        assert is_obj_array(potentials)

        def reorder(x):
            return x.with_queue(self.queue)[self.tree.sorted_target_ids]

        return with_object_array_or_scalar(reorder, potentials)
Esempio n. 17
0
    def reorder_potentials(self, potentials):
        from pytools.obj_array import is_obj_array, with_object_array_or_scalar
        assert is_obj_array(potentials)

        def reorder(x):
            return x.with_queue(self.queue)[self.tree.sorted_target_ids]

        return with_object_array_or_scalar(reorder, potentials)
Esempio n. 18
0
    def execute(self, exec_mapper, pre_assign_check=None):
        """Execute the instruction stream, make all scheduling decisions
        dynamically.
        """

        context = exec_mapper.context

        done_insns = set()

        while True:
            insn = None
            discardable_vars = []

            # pick the next insn
            if insn is None:
                try:
                    insn, discardable_vars = self.get_next_step(
                            frozenset(list(context.keys())),
                            frozenset(done_insns))

                except self.NoInstructionAvailable:
                    # no available instructions: we're done
                    break
                else:
                    for name in discardable_vars:
                        del context[name]

                    done_insns.add(insn)
                    assignments, new_futures = (
                            insn.get_exec_function(exec_mapper)
                            (exec_mapper.queue, insn, exec_mapper.bound_expr,
                                exec_mapper))

            if insn is not None:
                for target, value in assignments:
                    if pre_assign_check is not None:
                        pre_assign_check(target, value)

                    context[target] = value

                assert not new_futures

        if len(done_insns) < len(self.instructions):
            print("Unreachable instructions:")
            for insn in set(self.instructions) - done_insns:
                print("    ", str(insn).replace("\n", "\n     "))
                from pymbolic import var
                print("     missing: ", ", ".join(
                        str(s) for s in
                        set(insn.get_dependencies())
                        - set(var(v) for v in six.iterkeys(context))))

            raise RuntimeError("not all instructions are reachable"
                    "--did you forget to pass a value for a placeholder?")

        from pytools.obj_array import with_object_array_or_scalar
        return with_object_array_or_scalar(exec_mapper, self.result)
Esempio n. 19
0
def vector_to_device(queue, vec):
    from pytools.obj_array import with_object_array_or_scalar

    from pyopencl.array import to_device

    def to_dev(ary):
        return to_device(queue, ary)

    return with_object_array_or_scalar(to_dev, vec)
Esempio n. 20
0
def vector_to_device(queue, vec):
    from pytools.obj_array import with_object_array_or_scalar

    from pyopencl.array import to_device

    def to_dev(ary):
        return to_device(queue, ary)

    return with_object_array_or_scalar(to_dev, vec)
Esempio n. 21
0
def componentwise(f, expr):
    """Apply function *f* componentwise to object arrays and
    :class:`MultiVector` instances. *expr* is also allowed to
    be a scalar.
    """

    if isinstance(expr, MultiVector):
        return expr.map(f)

    from pytools.obj_array import with_object_array_or_scalar
    return with_object_array_or_scalar(f, expr)
Esempio n. 22
0
def componentwise(f, expr):
    """Apply function *f* componentwise to object arrays and
    :class:`MultiVector` instances. *expr* is also allowed to
    be a scalar.
    """

    if isinstance(expr, MultiVector):
        return expr.map(f)

    from pytools.obj_array import with_object_array_or_scalar
    return with_object_array_or_scalar(f, expr)
Esempio n. 23
0
    def _resample_and_get(self, queue, vec):
        from pytools.obj_array import with_object_array_or_scalar

        def resample_and_get_one(fld):
            from numbers import Number
            if isinstance(fld, Number):
                return np.ones(self.connection.to_discr.nnodes) * fld
            else:
                return self.connection(queue, fld).get(queue=queue)

        return with_object_array_or_scalar(resample_and_get_one, vec)
Esempio n. 24
0
        def drive_cost_model(wrangler, strengths, geo_data, kernel,
                             kernel_arguments):
            del strengths
            cost_model_result = (self.cost_model(wrangler, geo_data, kernel,
                                                 kernel_arguments))

            from pytools.obj_array import with_object_array_or_scalar
            output_placeholder = with_object_array_or_scalar(
                wrangler.finalize_potentials, wrangler.full_output_zeros())

            return output_placeholder, cost_model_result
Esempio n. 25
0
    def _resample_and_get(self, queue, vec):
        from pytools.obj_array import with_object_array_or_scalar

        def resample_and_get_one(fld):
            from numbers import Number
            if isinstance(fld, Number):
                return np.ones(self.connection.to_discr.nnodes) * fld
            else:
                return self.connection(queue, fld).get(queue=queue)

        return with_object_array_or_scalar(resample_and_get_one, vec)
Esempio n. 26
0
def vector_from_device(queue, vec):
    from pytools.obj_array import with_object_array_or_scalar

    def from_dev(ary):
        from numbers import Number
        if isinstance(ary, (np.number, Number)):
            # zero, most likely
            return ary

        return ary.get(queue=queue)

    return with_object_array_or_scalar(from_dev, vec)
Esempio n. 27
0
    def __call__(self, expr):
        from pytools.obj_array import with_object_array_or_scalar
        from hedge.tools import is_zero

        def bind_one(subexpr):
            if is_zero(subexpr):
                return subexpr
            else:
                from hedge.optemplate.primitives import OperatorBinding
                return OperatorBinding(self, subexpr)

        return with_object_array_or_scalar(bind_one, expr)
Esempio n. 28
0
    def execute(self, exec_mapper, pre_assign_check=None):
        """Execute the instruction stream, make all scheduling decisions
        dynamically.
        """

        context = exec_mapper.context

        done_insns = set()

        while True:
            discardable_vars = []
            insn = None

            try:
                insn, discardable_vars = self.get_next_step(
                    frozenset(list(context.keys())), frozenset(done_insns))

            except self.NoInstructionAvailable:
                # no available instructions: we're done
                break
            else:
                for name in discardable_vars:
                    del context[name]

                done_insns.add(insn)
                assignments = (self.get_exec_function(
                    insn, exec_mapper)(exec_mapper.queue, insn,
                                       exec_mapper.bound_expr, exec_mapper))

                assignees = insn.get_assignees()
                for target, value in assignments:
                    if pre_assign_check is not None:
                        pre_assign_check(target, value)

                    assert target in assignees
                    context[target] = value

        if len(done_insns) < len(self.instructions):
            print("Unreachable instructions:")
            for insn in set(self.instructions) - done_insns:
                print("    ", str(insn).replace("\n", "\n     "))
                from pymbolic import var
                print(
                    "     missing: ", ", ".join(
                        str(s) for s in set(insn.get_dependencies()) -
                        set(var(v) for v in six.iterkeys(context))))

            raise RuntimeError(
                "not all instructions are reachable"
                "--did you forget to pass a value for a placeholder?")

        from pytools.obj_array import with_object_array_or_scalar
        return with_object_array_or_scalar(exec_mapper, self.result)
Esempio n. 29
0
    def __call__(self, expr):
        from pytools.obj_array import with_object_array_or_scalar
        from grudge.tools import is_zero

        def bind_one(subexpr):
            if is_zero(subexpr):
                return subexpr
            else:
                from grudge.symbolic.primitives import OperatorBinding
                return OperatorBinding(self, subexpr)

        return with_object_array_or_scalar(bind_one, expr)
Esempio n. 30
0
def vector_from_device(queue, vec):
    from pytools.obj_array import with_object_array_or_scalar

    def from_dev(ary):
        from numbers import Number
        if isinstance(ary, (np.number, Number)):
            # zero, most likely
            return ary

        return ary.get(queue=queue)

    return with_object_array_or_scalar(from_dev, vec)
Esempio n. 31
0
    def map_whole_domain_flux(self, expr, typedict):
        repr_tag_cell = [type_info.no_type]

        def process_vol_flux_arg(flux_arg):
            typedict[flux_arg] = type_info.KnownInteriorFaces() \
                    .unify(repr_tag_cell[0], flux_arg)
            repr_tag_cell[0] = extract_representation(
                    self.rec(flux_arg, typedict))

        def process_bdry_flux_arg(flux_arg):
            typedict[flux_arg] = type_info.KnownBoundary(bpair.tag) \
                .unify(repr_tag_cell[0], flux_arg)

            repr_tag_cell[0] = extract_representation(
                    self.rec(flux_arg, typedict))

        from pytools.obj_array import with_object_array_or_scalar
        for int_flux_info in expr.interiors:
            with_object_array_or_scalar(process_vol_flux_arg,
                    int_flux_info.field_expr)

        for bdry_flux_info in expr.boundaries:
            bpair = bdry_flux_info.bpair
            with_object_array_or_scalar(process_vol_flux_arg, bpair.field)
            with_object_array_or_scalar(process_bdry_flux_arg, bpair.bfield)

        return type_info.VolumeVector(NodalRepresentation())
Esempio n. 32
0
    def get_next_step(self, available_names, done_insns):
        from pytools import all, argmax2
        available_insns = [
                (insn, insn.priority) for insn in self.instructions
                if insn not in done_insns
                and all(dep.name in available_names
                    for dep in insn.get_dependencies())]

        if not available_insns:
            raise self.NoInstructionAvailable

        needed_vars = set([
            dep.name
            for insn in self.instructions
            if insn not in done_insns
            for dep in insn.get_dependencies()
            ])
        discardable_vars = set(available_names) - needed_vars

        # {{{ make sure results do not get discarded
        from pytools.obj_array import with_object_array_or_scalar

        from pytential.symbolic.mappers import DependencyMapper
        dm = DependencyMapper(composite_leaves=False)

        def remove_result_variable(result_expr):
            # The extra dependency mapper run is necessary
            # because, for instance, subscripts can make it
            # into the result expression, which then does
            # not consist of just variables.

            for var in dm(result_expr):
                from pymbolic.primitives import Variable
                assert isinstance(var, Variable)
                discardable_vars.discard(var.name)

        with_object_array_or_scalar(remove_result_variable, self.result)
        # }}}

        return argmax2(available_insns), discardable_vars
Esempio n. 33
0
    def map_whole_domain_flux(self, expr, typedict):
        repr_tag_cell = [type_info.no_type]

        def process_vol_flux_arg(flux_arg):
            typedict[flux_arg] = type_info.KnownInteriorFaces() \
                    .unify(repr_tag_cell[0], flux_arg)
            repr_tag_cell[0] = extract_representation(
                self.rec(flux_arg, typedict))

        def process_bdry_flux_arg(flux_arg):
            typedict[flux_arg] = type_info.KnownBoundary(bpair.tag) \
                .unify(repr_tag_cell[0], flux_arg)

            repr_tag_cell[0] = extract_representation(
                self.rec(flux_arg, typedict))

        from pytools.obj_array import with_object_array_or_scalar
        for int_flux_info in expr.interiors:
            with_object_array_or_scalar(process_vol_flux_arg,
                                        int_flux_info.field_expr)

        for bdry_flux_info in expr.boundaries:
            bpair = bdry_flux_info.bpair
            with_object_array_or_scalar(process_vol_flux_arg, bpair.field)
            with_object_array_or_scalar(process_bdry_flux_arg, bpair.bfield)

        return type_info.VolumeVector(NodalRepresentation())
Esempio n. 34
0
 def transform_val(val):
     from pyopencl.algorithm import BuiltList
     if isinstance(val, np.ndarray) and val.dtype == object:
         from pytools.obj_array import with_object_array_or_scalar
         return with_object_array_or_scalar(f, val)
     elif isinstance(val, list):
         return [transform_val(i) for i in val]
     elif isinstance(val, BuiltList):
         return BuiltList(count=val.count,
                          starts=f(val.starts),
                          lists=f(val.lists))
     else:
         return f(val)
Esempio n. 35
0
    def __call__(self, operand, *args, **kwargs):
        # If the call is handed an object array full of operands,
        # return an object array of the operator applied to each of the
        # operands.

        from pytools.obj_array import is_obj_array, with_object_array_or_scalar
        if is_obj_array(operand):
            def make_op(operand_i):
                return self(operand_i, *args, **kwargs)

            return with_object_array_or_scalar(make_op, operand)
        else:
            return var.__call__(self, operand, *args, **kwargs)
Esempio n. 36
0
    def __call__(self, expr):
        def bind_one(subexpr):
            if p.is_zero(subexpr):
                return subexpr
            else:
                return OperatorBinding(self, subexpr)

        from pymbolic.geometric_algebra import MultiVector
        if isinstance(expr, MultiVector):
            return expr.map(bind_one)

        from pytools.obj_array import with_object_array_or_scalar
        return with_object_array_or_scalar(bind_one, expr)
Esempio n. 37
0
 def transform_val(val):
     from pyopencl.algorithm import BuiltList
     if isinstance(val, np.ndarray) and val.dtype == object:
         from pytools.obj_array import with_object_array_or_scalar
         return with_object_array_or_scalar(f, val)
     elif isinstance(val, list):
         return [transform_val(i) for i in val]
     elif isinstance(val, BuiltList):
         return BuiltList(
                 count=val.count,
                 starts=f(val.starts),
                 lists=f(val.lists))
     else:
         return f(val)
Esempio n. 38
0
    def get_next_step(self, available_names, done_insns):
        from pytools import all, argmax2
        available_insns = [(insn, insn.priority) for insn in self.instructions
                           if insn not in done_insns and all(
                               dep.name in available_names
                               for dep in insn.get_dependencies())]

        if not available_insns:
            raise self.NoInstructionAvailable

        needed_vars = set([
            dep.name for insn in self.instructions if insn not in done_insns
            for dep in insn.get_dependencies()
        ])
        discardable_vars = set(available_names) - needed_vars

        # {{{ make sure results do not get discarded
        from pytools.obj_array import with_object_array_or_scalar

        from pytential.symbolic.mappers import DependencyMapper
        dm = DependencyMapper(composite_leaves=False)

        def remove_result_variable(result_expr):
            # The extra dependency mapper run is necessary
            # because, for instance, subscripts can make it
            # into the result expression, which then does
            # not consist of just variables.

            for var in dm(result_expr):
                from pymbolic.primitives import Variable
                assert isinstance(var, Variable)
                discardable_vars.discard(var.name)

        with_object_array_or_scalar(remove_result_variable, self.result)
        # }}}

        return argmax2(available_insns), discardable_vars
Esempio n. 39
0
    def _transform_arrays(self, f):
        result = {}
        for field_name in self.__class__.fields:
            try:
                attr = getattr(self, field_name)
            except AttributeError:
                pass
            else:
                if isinstance(attr, np.ndarray) and attr.dtype == object:
                    from pytools.obj_array import with_object_array_or_scalar
                    result[field_name] = with_object_array_or_scalar(f, attr)
                else:
                    result[field_name] = f(attr)

        return self.copy(**result)
Esempio n. 40
0
    def _transform_arrays(self, f):
        result = {}
        for field_name in self.__class__.fields:
            try:
                attr = getattr(self, field_name)
            except AttributeError:
                pass
            else:
                if isinstance(attr, np.ndarray) and attr.dtype == object:
                    from pytools.obj_array import with_object_array_or_scalar
                    result[field_name] = with_object_array_or_scalar(f, attr)
                else:
                    result[field_name] = f(attr)

        return self.copy(**result)
Esempio n. 41
0
 def transform_val(val):
     from pyopencl.algorithm import BuiltList
     if isinstance(val, np.ndarray) and val.dtype == object:
         from pytools.obj_array import with_object_array_or_scalar
         return with_object_array_or_scalar(f, val)
     elif isinstance(val, list):
         return [transform_val(i) for i in val]
     elif isinstance(val, BuiltList):
         transformed_list = {}
         for field in val.__dict__:
             if field != 'count' and not field.startswith('_'):
                 transformed_list[field] = f(getattr(val, field))
         return BuiltList(count=val.count, **transformed_list)
     else:
         return f(val)
Esempio n. 42
0
    def face_mass(self, vec):
        if is_obj_array(vec):
            return with_object_array_or_scalar(lambda el: self.face_mass(el),
                                               vec)

        @memoize_in(self, "face_mass_knl")
        def knl():
            knl = lp.make_kernel(
                """{[k,i,f,j]:
                    0<=k<nelements and
                    0<=f<nfaces and
                    0<=i<nvol_nodes and
                    0<=j<nface_nodes}""",
                "result[k,i] = sum(f, sum(j, mat[i, f, j] * vec[f, k, j]))",
                default_offset=lp.auto,
                name="face_mass")

            knl = lp.split_iname(knl, "i", 16, inner_tag="l.0")
            return lp.tag_inames(knl, dict(k="g.0"))

        all_faces_conn = self.get_connection("vol", "all_faces")
        all_faces_discr = all_faces_conn.to_discr
        vol_discr = all_faces_conn.from_discr

        result = vol_discr.empty(queue=vec.queue, dtype=vec.dtype)

        fj = self.face_jacobian("all_faces")
        vec = vec * fj

        assert len(all_faces_discr.groups) == len(vol_discr.groups)

        for afgrp, volgrp in zip(all_faces_discr.groups, vol_discr.groups):
            nfaces = volgrp.mesh_el_group.nfaces

            matrix = self.get_local_face_mass_matrix(afgrp, volgrp, vec.dtype)

            input_view = afgrp.view(vec).reshape(nfaces, volgrp.nelements,
                                                 afgrp.nunit_nodes)
            knl()(vec.queue,
                  mat=matrix,
                  result=volgrp.view(result),
                  vec=input_view)

        return result
Esempio n. 43
0
def dd_axis(axis, ambient_dim, operand):
    """Return the derivative along (XYZ) axis *axis*
    (in *ambient_dim*-dimensional space) of *operand*.
    """
    from pytools.obj_array import is_obj_array, with_object_array_or_scalar
    if is_obj_array(operand):
        def dd_axis_comp(operand_i):
            return dd_axis(axis, ambient_dim, operand_i)

        return with_object_array_or_scalar(dd_axis_comp, operand)

    d = Derivative()

    unit_vector = np.zeros(ambient_dim)
    unit_vector[axis] = 1

    unit_mvector = MultiVector(unit_vector)

    return d.resolve(
            (unit_mvector.scalar_product(d.dnabla(ambient_dim)))
            * d(operand))
Esempio n. 44
0
    def tau(self, to_quad_op, state, mu=None):
        faceq_state = self.faceq_state()

        dimensions = self.dimensions

        # {{{ compute gradient of u ---------------------------------------
        # Use the product rule to compute the gradient of
        # u from the gradient of (rho u). This ensures we don't
        # compute the derivatives twice.

        from pytools.obj_array import with_object_array_or_scalar
        dq = with_object_array_or_scalar(to_quad_op, self.grad_of_state())

        q = cse(to_quad_op(state))

        du = numpy.zeros((dimensions, dimensions), dtype=object)
        for i in range(dimensions):
            for j in range(dimensions):
                du[i, j] = cse(
                    (dq[i + 2, j] - self.cse_u(q)[i] * dq[0, j]) / self.rho(q),
                    "du%d_d%s" % (i, AXES[j]))

        # }}}

        # {{{ put together viscous stress tau -----------------------------
        from pytools import delta

        if mu is None:
            mu = self.get_mu(q, to_quad_op)

        tau = numpy.zeros((dimensions, dimensions), dtype=object)
        for i in range(dimensions):
            for j in range(dimensions):
                tau[i, j] = cse(
                    mu *
                    cse(du[i, j] + du[j, i] -
                        2 / self.dimensions * delta(i, j) * numpy.trace(du)),
                    "tau_%d%d" % (i, j))

        return tau
Esempio n. 45
0
    def tau(self, to_quad_op, state, mu=None):
        faceq_state = self.faceq_state()

        dimensions = self.dimensions

        # {{{ compute gradient of u ---------------------------------------
        # Use the product rule to compute the gradient of
        # u from the gradient of (rho u). This ensures we don't
        # compute the derivatives twice.

        from pytools.obj_array import with_object_array_or_scalar
        dq = with_object_array_or_scalar(
                to_quad_op, self.grad_of_state())

        q = cse(to_quad_op(state))

        du = numpy.zeros((dimensions, dimensions), dtype=object)
        for i in range(dimensions):
            for j in range(dimensions):
                du[i,j] = cse(
                        (dq[i+2,j] - self.cse_u(q)[i] * dq[0,j]) / self.rho(q),
                        "du%d_d%s" % (i, AXES[j]))

        # }}}

        # {{{ put together viscous stress tau -----------------------------
        from pytools import delta

        if mu is None:
            mu = self.get_mu(q, to_quad_op)

        tau = numpy.zeros((dimensions, dimensions), dtype=object)
        for i in range(dimensions):
            for j in range(dimensions):
                tau[i,j] = cse(mu * cse(du[i,j] + du[j,i] -
                           2/self.dimensions * delta(i,j) * numpy.trace(du)),
                           "tau_%d%d" % (i, j))

        return tau
Esempio n. 46
0
def drive_fmm(expansion_wrangler, src_weights):
    """Top-level driver routine for the QBX fast multipole calculation.

    :arg geo_data: A :class:`QBXFMMGeometryData` instance.
    :arg expansion_wrangler: An object exhibiting the
        :class:`ExpansionWranglerInterface`.
    :arg src_weights: Source 'density/weights/charges'.
        Passed unmodified to *expansion_wrangler*.

    Returns the potentials computed by *expansion_wrangler*.

    See also :func:`boxtree.fmm.drive_fmm`.
    """
    wrangler = expansion_wrangler

    geo_data = wrangler.geo_data
    traversal = geo_data.traversal()
    tree = traversal.tree

    # Interface guidelines: Attributes of the tree are assumed to be known
    # to the expansion wrangler and should not be passed.

    logger.debug("start qbx fmm")

    logger.debug("reorder source weights")

    src_weights = wrangler.reorder_sources(src_weights)

    # {{{ construct local multipoles

    logger.debug("construct local multipoles")

    mpole_exps = wrangler.form_multipoles(
            traversal.source_boxes,
            src_weights)

    # }}}

    # {{{ propagate multipoles upward

    logger.debug("propagate multipoles upward")

    for lev in range(tree.nlevels-1, -1, -1):
        start_parent_box, end_parent_box = \
                traversal.level_start_source_parent_box_nrs[lev:lev+2]
        wrangler.coarsen_multipoles(
                traversal.source_parent_boxes[start_parent_box:end_parent_box],
                mpole_exps)

    # }}}

    # {{{ direct evaluation from neighbor source boxes ("list 1")

    logger.debug("direct evaluation from neighbor source boxes ('list 1')")

    non_qbx_potentials = wrangler.eval_direct(
            traversal.target_boxes,
            traversal.neighbor_source_boxes_starts,
            traversal.neighbor_source_boxes_lists,
            src_weights)

    # }}}

    # {{{ translate separated siblings' ("list 2") mpoles to local

    logger.debug("translate separated siblings' ('list 2') mpoles to local")

    local_exps = wrangler.multipole_to_local(
            traversal.target_or_target_parent_boxes,
            traversal.sep_siblings_starts,
            traversal.sep_siblings_lists,
            mpole_exps)

    # }}}

    # {{{ evaluate sep. smaller mpoles ("list 3") at particles

    logger.debug("evaluate sep. smaller mpoles at particles ('list 3 far')")

    # (the point of aiming this stage at particles is specifically to keep its
    # contribution *out* of the downward-propagating local expansions)

    non_qbx_potentials = non_qbx_potentials + wrangler.eval_multipoles(
            traversal.target_boxes,
            traversal.sep_smaller_starts,
            traversal.sep_smaller_lists,
            mpole_exps)

    # assert that list 3 close has been merged into list 1
    assert traversal.sep_close_smaller_starts is None

    # }}}

    # {{{ form locals for separated bigger mpoles ("list 4")

    logger.debug("form locals for separated bigger mpoles ('list 4 far')")

    local_exps = local_exps + wrangler.form_locals(
            traversal.target_or_target_parent_boxes,
            traversal.sep_bigger_starts,
            traversal.sep_bigger_lists,
            src_weights)

    # assert that list 4 close has been merged into list 1
    assert traversal.sep_close_bigger_starts is None

    # }}}

    # {{{ propagate local_exps downward

    logger.debug("propagate local_exps downward")

    for lev in range(1, tree.nlevels):
        start_box, end_box = \
                traversal.level_start_target_or_target_parent_box_nrs[lev:lev+2]
        wrangler.refine_locals(
                traversal.target_or_target_parent_boxes[start_box:end_box],
                local_exps)

    # }}}

    # {{{ evaluate locals

    logger.debug("evaluate locals")

    non_qbx_potentials = non_qbx_potentials + wrangler.eval_locals(
            traversal.target_boxes,
            local_exps)

    # }}}

    # {{{ wrangle qbx expansions

    logger.debug("form global qbx expansions from list 1")
    qbx_expansions = wrangler.form_global_qbx_locals(
            traversal.neighbor_source_boxes_starts,
            traversal.neighbor_source_boxes_lists,
            src_weights)

    logger.debug("translate from list 3 multipoles to qbx local expansions")
    qbx_expansions = qbx_expansions + \
            wrangler.translate_box_multipoles_to_qbx_local(mpole_exps)

    logger.debug("translate from box local expansions to contained "
            "qbx local expansions")
    qbx_expansions = qbx_expansions + \
            wrangler.translate_box_local_to_qbx_local(local_exps)

    logger.debug("evaluate qbx local expansions")
    qbx_potentials = wrangler.eval_qbx_expansions(
            qbx_expansions)

    # }}}

    # {{{ reorder potentials

    logger.debug("reorder potentials")

    nqbtl = geo_data.non_qbx_box_target_lists()

    from pytools.obj_array import make_obj_array
    all_potentials_in_tree_order = make_obj_array([
            cl.array.zeros(
                wrangler.queue,
                tree.ntargets,
                dtype=wrangler.dtype)
            for k in wrangler.code.out_kernels])

    for ap_i, nqp_i in zip(all_potentials_in_tree_order, non_qbx_potentials):
        ap_i[nqbtl.unfiltered_from_filtered_target_indices] = nqp_i

    all_potentials_in_tree_order += qbx_potentials

    def reorder_potentials(x):
        return x[tree.sorted_target_ids]

    from pytools.obj_array import with_object_array_or_scalar
    result = with_object_array_or_scalar(
            reorder_potentials, all_potentials_in_tree_order)

    # }}}

    logger.debug("qbx fmm complete")

    return result
Esempio n. 47
0
    def interp(self, src, tgt, vec):
        if is_obj_array(vec):
            return with_object_array_or_scalar(
                lambda el: self.interp(src, tgt, el), vec)

        return self.get_connection(src, tgt)(vec.queue, vec)
Esempio n. 48
0
def interior_trace_pair(discr, vec):
    i = discr.interp("vol", "int_faces", vec)
    e = with_object_array_or_scalar(
        lambda el: discr.opposite_face_connection()(el.queue, el), i)
    return TracePair("int_faces", i, e)
Esempio n. 49
0
def with_queue(queue, ary):
    return with_object_array_or_scalar(lambda x: x.with_queue(queue), ary)
Esempio n. 50
0
    def exec_layer_potential_insn_fmm(self, queue, insn, bound_expr, evaluate):
        # {{{ build list of unique target discretizations used

        # map (name, qbx_side) to number in list
        tgt_name_and_side_to_number = {}
        # list of tuples (discr, qbx_side)
        target_discrs_and_qbx_sides = []

        for o in insn.outputs:
            key = (o.target_name, o.qbx_forced_limit)
            if key not in tgt_name_and_side_to_number:
                tgt_name_and_side_to_number[key] = \
                        len(target_discrs_and_qbx_sides)

                target_discr = bound_expr.places[o.target_name]
                if isinstance(target_discr, LayerPotentialSource):
                    target_discr = target_discr.density_discr

                target_discrs_and_qbx_sides.append(
                        (target_discr, o.qbx_forced_limit))

        target_discrs_and_qbx_sides = tuple(target_discrs_and_qbx_sides)

        # }}}

        geo_data = self.qbx_fmm_geometry_data(target_discrs_and_qbx_sides)

        # FIXME Exert more positive control over geo_data attribute lifetimes using
        # geo_data.<method>.clear_cache(geo_data).

        # FIXME Synthesize "bad centers" around corners and edges that have
        # inadequate QBX coverage.

        # FIXME don't compute *all* output kernels on all targets--respect that
        # some target discretizations may only be asking for derivatives (e.g.)

        strengths = (evaluate(insn.density).with_queue(queue)
                * self.weights_and_area_elements())

        # {{{ get expansion wrangler

        base_kernel = None
        out_kernels = []

        from sumpy.kernel import AxisTargetDerivativeRemover
        for knl in insn.kernels:
            candidate_base_kernel = AxisTargetDerivativeRemover()(knl)

            if base_kernel is None:
                base_kernel = candidate_base_kernel
            else:
                assert base_kernel == candidate_base_kernel

        out_kernels = tuple(knl for knl in insn.kernels)

        if base_kernel.is_complex_valued or strengths.dtype.kind == "c":
            value_dtype = self.complex_dtype
        else:
            value_dtype = self.real_dtype

        # {{{ build extra_kwargs dictionaries

        # This contains things like the Helmholtz parameter k or
        # the normal directions for double layers.

        def reorder_sources(source_array):
            if isinstance(source_array, cl.array.Array):
                return (source_array
                        .with_queue(queue)
                        [geo_data.tree().user_point_source_ids]
                        .with_queue(None))
            else:
                return source_array

        kernel_extra_kwargs = {}
        source_extra_kwargs = {}

        from sumpy.tools import gather_arguments, gather_source_arguments
        from pytools.obj_array import with_object_array_or_scalar
        for func, var_dict in [
                (gather_arguments, kernel_extra_kwargs),
                (gather_source_arguments, source_extra_kwargs),
                ]:
            for arg in func(out_kernels):
                var_dict[arg.name] = with_object_array_or_scalar(
                        reorder_sources,
                        evaluate(insn.kernel_arguments[arg.name]))

        # }}}

        wrangler = self.expansion_wrangler_code_container(
                base_kernel, out_kernels).get_wrangler(
                        queue, geo_data, value_dtype,
                        source_extra_kwargs=source_extra_kwargs,
                        kernel_extra_kwargs=kernel_extra_kwargs)

        # }}}

        #geo_data.plot()

        if len(geo_data.global_qbx_centers()) != geo_data.center_info().ncenters:
            raise NotImplementedError("geometry has centers requiring local QBX")

        from pytential.qbx.geometry import target_state
        if (geo_data.user_target_to_center().with_queue(queue)
                == target_state.FAILED).get().any():
            raise RuntimeError("geometry has failed targets")

        # {{{ execute global QBX

        from pytential.qbx.fmm import drive_fmm
        all_potentials_on_every_tgt = drive_fmm(wrangler, strengths)

        # }}}

        result = []

        for o in insn.outputs:
            tgt_side_number = tgt_name_and_side_to_number[
                    o.target_name, o.qbx_forced_limit]
            tgt_slice = slice(*geo_data.target_info().target_discr_starts[
                    tgt_side_number:tgt_side_number+2])

            result.append(
                    (o.name,
                        all_potentials_on_every_tgt[o.kernel_index][tgt_slice]))

        return result, []
Esempio n. 51
0
 def apply_op(field):
     from hedge.tools import with_object_array_or_scalar
     return with_object_array_or_scalar(lambda f: bound_op(f=f), field)
Esempio n. 52
0
 def evaluate_wrapper(expr):
     value = evaluate(expr)
     return with_object_array_or_scalar(lambda x: x, value)
Esempio n. 53
0
 def evaluate_wrapper(expr):
     value = evaluate(expr)
     return with_object_array_or_scalar(lambda x: x, value)
Esempio n. 54
0
 def __call__(self, expr):
     from pytools.obj_array import with_object_array_or_scalar
     from functools import partial
     return with_object_array_or_scalar(
             partial(pymbolic.primitives.Expression.__call__, self),
             expr)
Esempio n. 55
0
    def map_operator_binding(self, expr, typedict):
        from hedge.optemplate.operators import (
            NodalReductionOperator, DiffOperatorBase,
            ReferenceDiffOperatorBase, MassOperatorBase,
            ReferenceMassOperatorBase, ElementwiseMaxOperator,
            BoundarizeOperator, FluxExchangeOperator, FluxOperatorBase,
            QuadratureGridUpsampler, QuadratureBoundaryGridUpsampler,
            QuadratureInteriorFacesGridUpsampler, MassOperator,
            ReferenceMassOperator, ReferenceQuadratureMassOperator,
            StiffnessTOperator, ReferenceStiffnessTOperator,
            ReferenceQuadratureStiffnessTOperator, ElementwiseLinearOperator)

        if isinstance(expr.op, NodalReductionOperator):
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.Scalar()

        elif isinstance(expr.op, (ReferenceQuadratureStiffnessTOperator,
                                  ReferenceQuadratureMassOperator)):
            typedict[expr.field] = type_info.VolumeVector(
                QuadratureRepresentation(expr.op.quadrature_tag))
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op,
                        (ReferenceStiffnessTOperator, StiffnessTOperator)):
            # stiffness_T can be specialized for quadrature by OperatorSpecializer
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, (ReferenceMassOperator, MassOperator)):
            # mass can be specialized for quadrature by OperatorSpecializer
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op,
                        (DiffOperatorBase, ReferenceDiffOperatorBase,
                         MassOperatorBase, ReferenceMassOperatorBase)):
            # all other operators are purely nodal
            typedict[expr.field] = type_info.VolumeVector(
                NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, ElementwiseMaxOperator):
            typedict[expr.field] = typedict[expr].unify(
                type_info.KnownVolume(), expr.field)
            return self.rec(expr.field, typedict)

        elif isinstance(expr.op, BoundarizeOperator):
            # upward propagation: argument has same rep tag as result
            typedict[expr.field] = type_info.KnownVolume().unify(
                extract_representation(type_info), expr.field)

            self.rec(expr.field, typedict)

            # downward propagation: result has same rep tag as argument
            return type_info.KnownBoundary(expr.op.tag).unify(
                extract_representation(typedict[expr.field]), expr)

        elif isinstance(expr.op, FluxExchangeOperator):
            raise NotImplementedError

        elif isinstance(expr.op, FluxOperatorBase):
            from pytools.obj_array import with_object_array_or_scalar
            from hedge.optemplate.primitives import BoundaryPair

            repr_tag_cell = [type_info.no_type]

            def process_vol_flux_arg(flux_arg):
                typedict[flux_arg] = type_info.KnownInteriorFaces() \
                        .unify(repr_tag_cell[0], flux_arg)
                repr_tag_cell[0] = extract_representation(
                    self.rec(flux_arg, typedict))

            if isinstance(expr.field, BoundaryPair):

                def process_bdry_flux_arg(flux_arg):
                    typedict[flux_arg] = type_info.KnownBoundary(bpair.tag) \
                        .unify(repr_tag_cell[0], flux_arg)

                    repr_tag_cell[0] = extract_representation(
                        self.rec(flux_arg, typedict))

                bpair = expr.field
                with_object_array_or_scalar(process_vol_flux_arg, bpair.field)
                with_object_array_or_scalar(process_bdry_flux_arg,
                                            bpair.bfield)
            else:
                with_object_array_or_scalar(process_vol_flux_arg, expr.field)

            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, QuadratureGridUpsampler):
            typedict[expr.field] = extract_domain(typedict[expr])
            self.rec(expr.field, typedict)
            return type_info.KnownRepresentation(
                    QuadratureRepresentation(expr.op.quadrature_tag))\
                            .unify(extract_domain(typedict[expr.field]), expr)

        elif isinstance(expr.op, QuadratureInteriorFacesGridUpsampler):
            typedict[expr.field] = type_info.VolumeVector(
                NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.InteriorFacesVector(
                QuadratureRepresentation(expr.op.quadrature_tag))

        elif isinstance(expr.op, QuadratureBoundaryGridUpsampler):
            typedict[expr.field] = type_info.BoundaryVector(
                expr.op.boundary_tag, NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.BoundaryVector(
                expr.op.boundary_tag,
                QuadratureRepresentation(expr.op.quadrature_tag))

        elif isinstance(expr.op, ElementwiseLinearOperator):
            typedict[expr.field] = type_info.VolumeVector(
                NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        else:
            raise RuntimeError("TypeInferrer doesn't know how to handle '%s'" %
                               expr.op)
Esempio n. 56
0
    def op_template(self, with_sensor=False):
        from hedge.optemplate import \
                Field, \
                make_vector_field, \
                BoundaryPair, \
                get_flux_operator, \
                make_nabla, \
                InverseMassOperator, \
                BoundarizeOperator

        d = self.dimensions

        w = make_vector_field("w", d + 1)
        u = w[0]
        v = w[1:]

        from hedge.tools import join_fields
        c = Field("c")
        flux_w = join_fields(c, w)

        # {{{ boundary conditions
        from hedge.flux import make_normal
        normal = make_normal(d)

        from hedge.tools import join_fields

        # Dirichlet
        dir_c = BoundarizeOperator(self.dirichlet_tag) * c
        dir_u = BoundarizeOperator(self.dirichlet_tag) * u
        dir_v = BoundarizeOperator(self.dirichlet_tag) * v

        dir_bc = join_fields(dir_c, -dir_u, dir_v)

        # Neumann
        neu_c = BoundarizeOperator(self.neumann_tag) * c
        neu_u = BoundarizeOperator(self.neumann_tag) * u
        neu_v = BoundarizeOperator(self.neumann_tag) * v

        neu_bc = join_fields(neu_c, neu_u, -neu_v)

        # Radiation
        from hedge.optemplate import make_normal
        rad_normal = make_normal(self.radiation_tag, d)

        rad_c = BoundarizeOperator(self.radiation_tag) * c
        rad_u = BoundarizeOperator(self.radiation_tag) * u
        rad_v = BoundarizeOperator(self.radiation_tag) * v

        rad_bc = join_fields(
            rad_c,
            0.5 * (rad_u - self.time_sign * numpy.dot(rad_normal, rad_v)),
            0.5 * rad_normal *
            (numpy.dot(rad_normal, rad_v) - self.time_sign * rad_u))

        # }}}

        # {{{ diffusion -------------------------------------------------------
        from pytools.obj_array import with_object_array_or_scalar

        def make_diffusion(arg):
            if with_sensor or (self.diffusion_coeff is not None
                               and self.diffusion_coeff != 0):
                if self.diffusion_coeff is None:
                    diffusion_coeff = 0
                else:
                    diffusion_coeff = self.diffusion_coeff

                if with_sensor:
                    diffusion_coeff += Field("sensor")

                from hedge.second_order import SecondDerivativeTarget

                # strong_form here allows the reuse the value of grad u.
                grad_tgt = SecondDerivativeTarget(
                    self.dimensions, strong_form=True, operand=arg)

                self.diffusion_scheme.grad(
                    grad_tgt,
                    bc_getter=None,
                    dirichlet_tags=[],
                    neumann_tags=[])

                div_tgt = SecondDerivativeTarget(
                    self.dimensions,
                    strong_form=False,
                    operand=diffusion_coeff * grad_tgt.minv_all)

                self.diffusion_scheme.div(
                    div_tgt,
                    bc_getter=None,
                    dirichlet_tags=[],
                    neumann_tags=[])

                return div_tgt.minv_all
            else:
                return 0

        # }}}

        # entire operator -----------------------------------------------------
        nabla = make_nabla(d)
        flux_op = get_flux_operator(self.flux())

        return (
            -join_fields(
                -self.time_sign * c * numpy.dot(nabla, v) - make_diffusion(u),
                -self.time_sign * c *
                (nabla * u) - with_object_array_or_scalar(make_diffusion, v)) +
            InverseMassOperator() * (flux_op(flux_w) + flux_op(
                BoundaryPair(flux_w, dir_bc, self.dirichlet_tag)) + flux_op(
                    BoundaryPair(flux_w, neu_bc, self.neumann_tag)) + flux_op(
                        BoundaryPair(flux_w, rad_bc, self.radiation_tag))))
Esempio n. 57
0
    def op_template(self, with_sensor=False):
        from hedge.optemplate import \
                Field, \
                make_sym_vector, \
                BoundaryPair, \
                get_flux_operator, \
                make_nabla, \
                InverseMassOperator, \
                BoundarizeOperator

        d = self.dimensions

        w = make_sym_vector("w", d+1)
        u = w[0]
        v = w[1:]

        from hedge.tools import join_fields
        flux_w = join_fields(self.c, w)

        # {{{ boundary conditions
        from hedge.tools import join_fields

        # Dirichlet
        dir_c = BoundarizeOperator(self.dirichlet_tag) * self.c
        dir_u = BoundarizeOperator(self.dirichlet_tag) * u
        dir_v = BoundarizeOperator(self.dirichlet_tag) * v

        dir_bc = join_fields(dir_c, -dir_u, dir_v)

        # Neumann
        neu_c = BoundarizeOperator(self.neumann_tag) * self.c
        neu_u = BoundarizeOperator(self.neumann_tag) * u
        neu_v = BoundarizeOperator(self.neumann_tag) * v

        neu_bc = join_fields(neu_c, neu_u, -neu_v)

        # Radiation
        from hedge.optemplate import make_normal
        rad_normal = make_normal(self.radiation_tag, d)

        rad_c = BoundarizeOperator(self.radiation_tag) * self.c
        rad_u = BoundarizeOperator(self.radiation_tag) * u
        rad_v = BoundarizeOperator(self.radiation_tag) * v

        rad_bc = join_fields(
                rad_c,
                0.5*(rad_u - self.time_sign*np.dot(rad_normal, rad_v)),
                0.5*rad_normal*(np.dot(rad_normal, rad_v) - self.time_sign*rad_u)
                )

        # }}}

        # {{{ diffusion -------------------------------------------------------
        from pytools.obj_array import with_object_array_or_scalar

        def make_diffusion(arg):
            if with_sensor or (
                    self.diffusion_coeff is not None and self.diffusion_coeff != 0):
                if self.diffusion_coeff is None:
                    diffusion_coeff = 0
                else:
                    diffusion_coeff = self.diffusion_coeff

                if with_sensor:
                    diffusion_coeff += Field("sensor")

                from hedge.second_order import SecondDerivativeTarget

                # strong_form here allows the reuse the value of grad u.
                grad_tgt = SecondDerivativeTarget(
                        self.dimensions, strong_form=True,
                        operand=arg)

                self.diffusion_scheme.grad(grad_tgt, bc_getter=None,
                        dirichlet_tags=[], neumann_tags=[])

                div_tgt = SecondDerivativeTarget(
                        self.dimensions, strong_form=False,
                        operand=diffusion_coeff*grad_tgt.minv_all)

                self.diffusion_scheme.div(div_tgt,
                        bc_getter=None,
                        dirichlet_tags=[], neumann_tags=[])

                return div_tgt.minv_all
            else:
                return 0

        # }}}

        # entire operator -----------------------------------------------------
        nabla = make_nabla(d)
        flux_op = get_flux_operator(self.flux())

        return (
                - join_fields(
                    - self.time_sign*self.c*np.dot(nabla, v) - make_diffusion(u)
                    + self.source,

                    -self.time_sign*self.c*(nabla*u) - with_object_array_or_scalar(
                        make_diffusion, v)
                    )
                +
                InverseMassOperator() * (
                    flux_op(flux_w)
                    + flux_op(BoundaryPair(flux_w, dir_bc, self.dirichlet_tag))
                    + flux_op(BoundaryPair(flux_w, neu_bc, self.neumann_tag))
                    + flux_op(BoundaryPair(flux_w, rad_bc, self.radiation_tag))
                    ))
Esempio n. 58
0
    def map_operator_binding(self, expr, typedict):
        from hedge.optemplate.operators import (
                NodalReductionOperator,

                DiffOperatorBase,
                ReferenceDiffOperatorBase,

                MassOperatorBase,
                ReferenceMassOperatorBase,

                ElementwiseMaxOperator,
                BoundarizeOperator, FluxExchangeOperator,
                FluxOperatorBase,
                QuadratureGridUpsampler, QuadratureBoundaryGridUpsampler,
                QuadratureInteriorFacesGridUpsampler,

                MassOperator,
                ReferenceMassOperator,
                ReferenceQuadratureMassOperator,

                StiffnessTOperator,
                ReferenceStiffnessTOperator,
                ReferenceQuadratureStiffnessTOperator,

                ElementwiseLinearOperator)

        if isinstance(expr.op, NodalReductionOperator):
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.Scalar()

        elif isinstance(expr.op,
                (ReferenceQuadratureStiffnessTOperator,
                    ReferenceQuadratureMassOperator)):
            typedict[expr.field] = type_info.VolumeVector(
                    QuadratureRepresentation(expr.op.quadrature_tag))
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op,
                (ReferenceStiffnessTOperator, StiffnessTOperator)):
            # stiffness_T can be specialized for quadrature by OperatorSpecializer
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op,
                (ReferenceMassOperator, MassOperator)):
            # mass can be specialized for quadrature by OperatorSpecializer
            typedict[expr.field] = type_info.KnownVolume()
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, (
                DiffOperatorBase,
                ReferenceDiffOperatorBase,
                MassOperatorBase,
                ReferenceMassOperatorBase)):
            # all other operators are purely nodal
            typedict[expr.field] = type_info.VolumeVector(NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, ElementwiseMaxOperator):
            typedict[expr.field] = typedict[expr].unify(
                    type_info.KnownVolume(), expr.field)
            return self.rec(expr.field, typedict)

        elif isinstance(expr.op, BoundarizeOperator):
            # upward propagation: argument has same rep tag as result
            typedict[expr.field] = type_info.KnownVolume().unify(
                    extract_representation(type_info), expr.field)

            self.rec(expr.field, typedict)

            # downward propagation: result has same rep tag as argument
            return type_info.KnownBoundary(expr.op.tag).unify(
                    extract_representation(typedict[expr.field]), expr)

        elif isinstance(expr.op, FluxExchangeOperator):
            raise NotImplementedError

        elif isinstance(expr.op, FluxOperatorBase):
            from pytools.obj_array import with_object_array_or_scalar
            from hedge.optemplate.primitives import BoundaryPair

            repr_tag_cell = [type_info.no_type]

            def process_vol_flux_arg(flux_arg):
                typedict[flux_arg] = type_info.KnownInteriorFaces() \
                        .unify(repr_tag_cell[0], flux_arg)
                repr_tag_cell[0] = extract_representation(
                        self.rec(flux_arg, typedict))

            if isinstance(expr.field, BoundaryPair):
                def process_bdry_flux_arg(flux_arg):
                    typedict[flux_arg] = type_info.KnownBoundary(bpair.tag) \
                        .unify(repr_tag_cell[0], flux_arg)

                    repr_tag_cell[0] = extract_representation(
                            self.rec(flux_arg, typedict))

                bpair = expr.field
                with_object_array_or_scalar(process_vol_flux_arg, bpair.field)
                with_object_array_or_scalar(process_bdry_flux_arg, bpair.bfield)
            else:
                with_object_array_or_scalar(process_vol_flux_arg, expr.field)

            return type_info.VolumeVector(NodalRepresentation())

        elif isinstance(expr.op, QuadratureGridUpsampler):
            typedict[expr.field] = extract_domain(typedict[expr])
            self.rec(expr.field, typedict)
            return type_info.KnownRepresentation(
                    QuadratureRepresentation(expr.op.quadrature_tag))\
                            .unify(extract_domain(typedict[expr.field]), expr)

        elif isinstance(expr.op, QuadratureInteriorFacesGridUpsampler):
            typedict[expr.field] = type_info.VolumeVector(
                    NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.InteriorFacesVector(
                    QuadratureRepresentation(expr.op.quadrature_tag))

        elif isinstance(expr.op, QuadratureBoundaryGridUpsampler):
            typedict[expr.field] = type_info.BoundaryVector(
                    expr.op.boundary_tag, NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.BoundaryVector(
                    expr.op.boundary_tag,
                    QuadratureRepresentation(expr.op.quadrature_tag))

        elif isinstance(expr.op, ElementwiseLinearOperator):
            typedict[expr.field] = type_info.VolumeVector(NodalRepresentation())
            self.rec(expr.field, typedict)
            return type_info.VolumeVector(NodalRepresentation())

        else:
            raise RuntimeError("TypeInferrer doesn't know how to handle '%s'"
                    % expr.op)
Esempio n. 59
0
 def evaluate_wrapper(expr):
     value = evaluate(expr)
     return with_object_array_or_scalar(oversample, value)