Example #1
0
    def integrate_in_time(self, insn):
        from hedge.timestep.multirate_ab.methods import CO_FAST
        from hedge.timestep.multirate_ab.methods import HIST_F2F, HIST_S2F, HIST_F2S, HIST_S2S

        if insn.component == CO_FAST:
            self_hn, cross_hn = HIST_F2F, HIST_S2F
        else:
            self_hn, cross_hn = HIST_S2S, HIST_F2S

        start_time_level = self.eval_expr(insn.start)
        end_time_level = self.eval_expr(insn.end)

        self_coefficients = self.stepper.get_coefficients(
            self.stepper.hist_is_fast[self_hn],
            self.hist_head_time_level[self_hn],
            start_time_level,
            end_time_level,
            self.stepper.orders[self_hn],
        )
        cross_coefficients = self.stepper.get_coefficients(
            self.stepper.hist_is_fast[cross_hn],
            self.hist_head_time_level[cross_hn],
            start_time_level,
            end_time_level,
            self.stepper.orders[cross_hn],
        )

        if start_time_level == 0 or (insn.result_name not in self.context):
            my_y = self.last_y[insn.component]
            assert start_time_level == 0
        else:
            my_y = self.context[insn.result_name]()
            assert start_time_level == self.var_time_level[insn.result_name]

        hists = self.stepper.histories
        self_history = hists[self_hn][:]
        cross_history = hists[cross_hn][:]
        if False:
            my_integrated_y = memoize(
                lambda: my_y
                + self.stepper.large_dt
                * (_linear_comb(self_coefficients, self_history) + _linear_comb(cross_coefficients, cross_history))
            )
        else:
            my_new_y = my_y + self.stepper.large_dt * (
                _linear_comb(self_coefficients, self_history) + _linear_comb(cross_coefficients, cross_history)
            )
            my_integrated_y = lambda: my_new_y

        self.context[insn.result_name] = my_integrated_y
        self.var_time_level[insn.result_name] = end_time_level

        MRABProcessor.integrate_in_time(self, insn)
Example #2
0
    def integrate_in_time(self, insn):
        from hedge.timestep.multirate_ab.methods import CO_FAST
        from hedge.timestep.multirate_ab.methods import \
                HIST_F2F, HIST_S2F, HIST_F2S, HIST_S2S

        if insn.component == CO_FAST:
            self_hn, cross_hn = HIST_F2F, HIST_S2F
        else:
            self_hn, cross_hn = HIST_S2S, HIST_F2S

        start_time_level = self.eval_expr(insn.start)
        end_time_level = self.eval_expr(insn.end)

        self_coefficients = self.stepper.get_coefficients(
            self.stepper.hist_is_fast[self_hn],
            self.hist_head_time_level[self_hn], start_time_level,
            end_time_level, self.stepper.orders[self_hn])
        cross_coefficients = self.stepper.get_coefficients(
            self.stepper.hist_is_fast[cross_hn],
            self.hist_head_time_level[cross_hn], start_time_level,
            end_time_level, self.stepper.orders[cross_hn])

        if start_time_level == 0 or (insn.result_name not in self.context):
            my_y = self.last_y[insn.component]
            assert start_time_level == 0
        else:
            my_y = self.context[insn.result_name]()
            assert start_time_level == \
                    self.var_time_level[insn.result_name]

        hists = self.stepper.histories
        self_history = hists[self_hn][:]
        cross_history = hists[cross_hn][:]
        if False:
            my_integrated_y = memoize(lambda: my_y + self.stepper.large_dt * (
                _linear_comb(self_coefficients, self_history) + _linear_comb(
                    cross_coefficients, cross_history)))
        else:
            my_new_y = my_y + self.stepper.large_dt * (
                _linear_comb(self_coefficients, self_history) +
                _linear_comb(cross_coefficients, cross_history))
            my_integrated_y = lambda: my_new_y

        self.context[insn.result_name] = my_integrated_y
        self.var_time_level[insn.result_name] = end_time_level

        MRABProcessor.integrate_in_time(self, insn)
Example #3
0
                    + result)

        if self.saw_double:
            result = (
                    "#pragma OPENCL EXTENSION cl_khr_fp64: enable\n"
                    "#define PYOPENCL_DEFINE_CDOUBLE\n"
                    + result)

        return result

if _dtype_hashable:
    _memoize_match_dtype_to_c_struct = memoize
else:
    import json as _json
    _memoize_match_dtype_to_c_struct = memoize(
        key=lambda device, name, dtype, context=None:
        (device, name, _dtype_to_key(dtype), context))

@_memoize_match_dtype_to_c_struct
def match_dtype_to_c_struct(device, name, dtype, context=None):
    """Return a tuple `(dtype, c_decl)` such that the C struct declaration
    in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
    have the same memory layout.

    Note that *dtype* may be modified from the value that was passed in,
    for example to insert padding.

    (As a remark on implementation, this routine runs a small kernel on
    the given *device* to ensure that :mod:`numpy` and C offsets and
    sizes match.)
Example #4
0
        if self.saw_complex:
            result = ("#include <pyopencl-complex.h>\n\n" + result)

        if self.saw_double:
            result = ("#pragma OPENCL EXTENSION cl_khr_fp64: enable\n"
                      "#define PYOPENCL_DEFINE_CDOUBLE\n" + result)

        return result


if _dtype_hashable:
    _memoize_match_dtype_to_c_struct = memoize
else:
    import json as _json
    _memoize_match_dtype_to_c_struct = memoize(
        key=lambda device, name, dtype, context=None:
        (device, name, _dtype_to_key(dtype), context))


@_memoize_match_dtype_to_c_struct
def match_dtype_to_c_struct(device, name, dtype, context=None):
    """Return a tuple `(dtype, c_decl)` such that the C struct declaration
    in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
    have the same memory layout.

    Note that *dtype* may be modified from the value that was passed in,
    for example to insert padding.

    (As a remark on implementation, this routine runs a small kernel on
    the given *device* to ensure that :mod:`numpy` and C offsets and
    sizes match.)