Ejemplo n.º 1
0
    def __call__(self, expr, type_hints={}):
        # Put the result expressions into variables as well.
        from hedge.optemplate import make_common_subexpression as cse
        expr = cse(expr, "_result")

        from hedge.optemplate.mappers.type_inference import TypeInferrer
        self.typedict = TypeInferrer()(expr, type_hints)

        # {{{ flux batching
        # Fluxes can be evaluated faster in batches. Here, we find flux
        # batches that we can evaluate together.

        # For each FluxRecord, find the other fluxes its flux depends on.
        flux_queue = self.get_contained_fluxes(expr)
        for fr in flux_queue:
            fr.dependencies = set(sf.flux_expr
                    for sf in self.get_contained_fluxes(fr.flux_expr)) \
                            - set([fr.flux_expr])

        # Then figure out batches of fluxes to evaluate
        self.flux_batches = []
        admissible_deps = set()
        while flux_queue:
            present_batch = set()
            i = 0
            while i < len(flux_queue):
                fr = flux_queue[i]
                if fr.dependencies <= admissible_deps:
                    present_batch.add(fr)
                    flux_queue.pop(i)
                else:
                    i += 1

            if present_batch:
                # bin batched operators by representative operator
                batches_by_repr_op = {}
                for fr in present_batch:
                    batches_by_repr_op[fr.repr_op] = \
                            batches_by_repr_op.get(fr.repr_op, set()) \
                            | set([fr.flux_expr])

                for repr_op, batch in batches_by_repr_op.iteritems():
                    self.flux_batches.append(
                        self.FluxBatch(repr_op=repr_op,
                                       flux_exprs=list(batch)))

                admissible_deps |= set(fr.flux_expr for fr in present_batch)
            else:
                raise RuntimeError("cannot resolve flux evaluation order")

        # }}}

        # Used for diff batching

        self.diff_ops = self.collect_diff_ops(expr)

        # Flux exchange also works better when batched.
        self.flux_exchange_ops = self.collect_flux_exchange_ops(expr)

        # Finally, walk the expression and build the code.
        result = IdentityMapper.__call__(self, expr)

        return Code(self.aggregate_assignments(self.code, result), result)
Ejemplo n.º 2
0
    def __call__(self, expr, type_hints={}):
        from hedge.optemplate.mappers.type_inference import TypeInferrer
        self.typedict = TypeInferrer()(expr, type_hints)

        # {{{ flux batching
        # Fluxes can be evaluated faster in batches. Here, we find flux 
        # batches that we can evaluate together.

        # For each FluxRecord, find the other fluxes its flux depends on.
        flux_queue = self.get_contained_fluxes(expr)
        for fr in flux_queue:
            fr.dependencies = set(sf.flux_expr
                    for sf in self.get_contained_fluxes(fr.flux_expr)) \
                            - set([fr.flux_expr])

        # Then figure out batches of fluxes to evaluate
        self.flux_batches = []
        admissible_deps = set()
        while flux_queue:
            present_batch = set()
            i = 0
            while i < len(flux_queue):
                fr = flux_queue[i]
                if fr.dependencies <= admissible_deps:
                    present_batch.add(fr)
                    flux_queue.pop(i)
                else:
                    i += 1

            if present_batch:
                # bin batched operators by representative operator
                batches_by_repr_op = {}
                for fr in present_batch:
                    batches_by_repr_op[fr.repr_op] = \
                            batches_by_repr_op.get(fr.repr_op, set()) \
                            | set([fr.flux_expr])

                for repr_op, batch in batches_by_repr_op.iteritems():
                    self.flux_batches.append(
                            self.FluxBatch(
                                repr_op=repr_op, 
                                flux_exprs=list(batch)))

                admissible_deps |= set(fr.flux_expr for fr in present_batch)
            else:
                raise RuntimeError("cannot resolve flux evaluation order")

        # }}}

        # Once flux batching is figured out, we also need to know which
        # derivatives are going to be needed, because once the
        # rst-derivatives are available, it's best to calculate the
        # xyz ones and throw the rst ones out. It's naturally good if
        # we can avoid computing (or storing) some of the xyz ones.
        # So figure out which XYZ derivatives of what are needed.

        self.diff_ops = self.collect_diff_ops(expr)

        # Flux exchange also works better when batched.
        self.flux_exchange_ops = self.collect_flux_exchange_ops(expr)

        # Finally, walk the expression and build the code.
        result = IdentityMapper.__call__(self, expr)

        # Then, put the toplevel expressions into variables as well.
        from hedge.tools import with_object_array_or_scalar
        result = with_object_array_or_scalar(self.assign_to_new_var, result)
        return Code(self.aggregate_assignments(self.code, result), result)
Ejemplo n.º 3
0
    def __call__(self, expr, type_hints={}):
        # Put the result expressions into variables as well.
        from hedge.optemplate import make_common_subexpression as cse
        expr = cse(expr, "_result")

        from hedge.optemplate.mappers.type_inference import TypeInferrer
        self.typedict = TypeInferrer()(expr, type_hints)

        # {{{ flux batching
        # Fluxes can be evaluated faster in batches. Here, we find flux
        # batches that we can evaluate together.

        # For each FluxRecord, find the other fluxes its flux depends on.
        flux_queue = self.get_contained_fluxes(expr)
        for fr in flux_queue:
            fr.dependencies = set(sf.flux_expr
                    for sf in self.get_contained_fluxes(fr.flux_expr)) \
                            - set([fr.flux_expr])

        # Then figure out batches of fluxes to evaluate
        self.flux_batches = []
        admissible_deps = set()
        while flux_queue:
            present_batch = set()
            i = 0
            while i < len(flux_queue):
                fr = flux_queue[i]
                if fr.dependencies <= admissible_deps:
                    present_batch.add(fr)
                    flux_queue.pop(i)
                else:
                    i += 1

            if present_batch:
                # bin batched operators by representative operator
                batches_by_repr_op = {}
                for fr in present_batch:
                    batches_by_repr_op[fr.repr_op] = \
                            batches_by_repr_op.get(fr.repr_op, set()) \
                            | set([fr.flux_expr])

                for repr_op, batch in batches_by_repr_op.iteritems():
                    self.flux_batches.append(
                            self.FluxBatch(
                                repr_op=repr_op,
                                flux_exprs=list(batch)))

                admissible_deps |= set(fr.flux_expr for fr in present_batch)
            else:
                raise RuntimeError("cannot resolve flux evaluation order")

        # }}}

        # Used for diff batching

        self.diff_ops = self.collect_diff_ops(expr)

        # Flux exchange also works better when batched.
        self.flux_exchange_ops = self.collect_flux_exchange_ops(expr)

        # Finally, walk the expression and build the code.
        result = IdentityMapper.__call__(self, expr)

        return Code(self.aggregate_assignments(self.code, result), result)