コード例 #1
0
ファイル: hackathon.py プロジェクト: umarbrowser/tvb-hpc
def make_knl():

    target = NumbaTarget()

    # build individual kernels
    osc = model.Kuramoto()
    osc.dt = 1.0
    osc.const['omega'] = 10.0 * 2.0 * np.pi / 1e3
    osc_knl = osc.kernel(target)

    cfun = coupling.Kuramoto(osc)
    cfun.param['a'] = pm.parse('a')
    net = network.Network(osc, cfun)
    net_knl = net.kernel(target)

    scm = scheme.EulerStep(osc.dt)
    scm_knl = scm.kernel(target)
    scm_knl = lp.fix_parameters(scm_knl, nsvar=len(osc.state_sym))

    # fuse kernels
    knls = osc_knl, net_knl, scm_knl
    data_flow = [('input', 1, 0), ('diffs', 0, 2), ('drift', 0, 2),
                 ('state', 2, 0)]
    knl = lp.fuse_kernels(knls, data_flow=data_flow)

    # and time step
    knl = lp.to_batched(knl, 'nstep', [], 'i_step', sequential=True)
    knl = lp.fix_parameters(knl,
                            i_time=pm.parse('(i_step + i_step_0) % ntime'))
    knl.args.append(lp.ValueArg('i_step_0', np.uintc))
    knl = lp.add_dtypes(knl, {'i_step_0': np.uintc})

    return knl, osc
コード例 #2
0
def network_time_step(
        model: model.BaseKernel,
        coupling: coupling.BaseCoupling,
        scheme: scheme.TimeStepScheme,
        target: lp.target.TargetBase=None,
        ):
    target = target or utils.default_target()
    # fuse kernels
    kernels = [
        model.kernel(target),
        network.Network(model, coupling).kernel(target),
        lp.fix_parameters(scheme.kernel(target), nsvar=len(model.state_sym)),
    ]
    data_flow = [
        ('input', 1, 0),
        ('diffs', 0, 2),
        ('drift', 0, 2),
        ('state', 2, 0)
    ]
    knl = lp.fuse_kernels(kernels, data_flow=data_flow)
    # time step
    knl = lp.to_batched(knl, 'nstep', [], 'i_step', sequential=True)
    new_i_time = pm.parse('(i_step + i_step_0) % ntime')
    knl = lp.fix_parameters(knl, i_time=new_i_time)
    knl.args.append(lp.ValueArg('i_step_0', np.uintc))
    knl = lp.add_dtypes(knl, {'i_step_0': np.uintc})
    return knl
コード例 #3
0
ファイル: test_fortran.py プロジェクト: spillai/loopy
def test_fuse_kernels(ctx_factory):
    fortran_template = """
        subroutine {name}(nelements, ndofs, result, d, q)
          implicit none
          integer e, i, j, k
          integer nelements, ndofs
          real*8 result(nelements, ndofs, ndofs)
          real*8 q(nelements, ndofs, ndofs)
          real*8 d(ndofs, ndofs)
          real*8 prev

          do e = 1,nelements
            do i = 1,ndofs
              do j = 1,ndofs
                do k = 1,ndofs
                  {inner}
                end do
              end do
            end do
          end do
        end subroutine
        """

    xd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,i,k)
        """
    yd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,k,j)
        """

    xderiv, = lp.parse_fortran(
        fortran_template.format(inner=xd_line, name="xderiv"))
    yderiv, = lp.parse_fortran(
        fortran_template.format(inner=yd_line, name="yderiv"))
    xyderiv, = lp.parse_fortran(
        fortran_template.format(inner=(xd_line + "\n" + yd_line),
                                name="xyderiv"))

    knl = lp.fuse_kernels((xderiv, yderiv))
    knl = lp.prioritize_loops(knl, "e,i,j,k")

    assert len(knl.temporary_variables) == 2

    # This is needed for correctness, otherwise ordering could foul things up.
    knl = lp.assignment_to_subst(knl, "prev")
    knl = lp.assignment_to_subst(knl, "prev_0")

    ctx = ctx_factory()
    lp.auto_test_vs_ref(xyderiv,
                        ctx,
                        knl,
                        parameters=dict(nelements=20, ndofs=4))
コード例 #4
0
def test_fusion():
    exp_kernel = lp.make_kernel(''' { [i]: 0<=i<n } ''',
                                ''' exp[i] = pow(E, z[i])''',
                                assumptions="n>0")

    sum_kernel = lp.make_kernel('{ [j]: 0<=j<n }',
                                'out2 = sum(j, exp[j])',
                                assumptions='n>0')

    knl = lp.fuse_kernels([exp_kernel, sum_kernel])

    print(knl)
コード例 #5
0
def test_fusion():
    exp_kernel = lp.make_kernel(""" { [i]: 0<=i<n } """,
                                """ exp[i] = pow(E, z[i])""",
                                assumptions="n>0")

    sum_kernel = lp.make_kernel("{ [j]: 0<=j<n }",
                                "out2 = sum(j, exp[j])",
                                assumptions="n>0")

    knl = lp.fuse_kernels([exp_kernel, sum_kernel])

    print(knl)
コード例 #6
0
def test_fuse_kernels(ctx_factory):
    fortran_template = """
        subroutine {name}(nelements, ndofs, result, d, q)
          implicit none
          integer e, i, j, k
          integer nelements, ndofs
          real*8 result(nelements, ndofs, ndofs)
          real*8 q(nelements, ndofs, ndofs)
          real*8 d(ndofs, ndofs)
          real*8 prev

          do e = 1,nelements
            do i = 1,ndofs
              do j = 1,ndofs
                do k = 1,ndofs
                  {inner}
                end do
              end do
            end do
          end do
        end subroutine
        """

    xd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,i,k)
        """
    yd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,k,j)
        """

    xderiv = lp.parse_fortran(
        fortran_template.format(inner=xd_line, name="xderiv"))
    yderiv = lp.parse_fortran(
        fortran_template.format(inner=yd_line, name="yderiv"))
    xyderiv = lp.parse_fortran(
        fortran_template.format(inner=(xd_line + "\n" + yd_line),
                                name="xyderiv"))

    knl = lp.fuse_kernels((xderiv["xderiv"], yderiv["yderiv"]),
                          data_flow=[("result", 0, 1)])
    knl = knl.with_kernel(
        lp.prioritize_loops(knl["xderiv_and_yderiv"], "e,i,j,k"))

    assert len(knl["xderiv_and_yderiv"].temporary_variables) == 2

    ctx = ctx_factory()
    lp.auto_test_vs_ref(xyderiv,
                        ctx,
                        knl,
                        parameters=dict(nelements=20, ndofs=4))
コード例 #7
0
ファイル: test_fortran.py プロジェクト: cmsquared/loopy
def test_fuse_kernels(ctx_factory):
    fortran_template = """
        subroutine {name}(nelements, ndofs, result, d, q)
          implicit none
          integer e, i, j, k
          integer nelements, ndofs
          real*8 result(nelements, ndofs, ndofs)
          real*8 q(nelements, ndofs, ndofs)
          real*8 d(ndofs, ndofs)
          real*8 prev

          do e = 1,nelements
            do i = 1,ndofs
              do j = 1,ndofs
                do k = 1,ndofs
                  {inner}
                end do
              end do
            end do
          end do
        end subroutine
        """

    xd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,i,k)
        """
    yd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,k,j)
        """

    xderiv, = lp.parse_fortran(
            fortran_template.format(inner=xd_line, name="xderiv"))
    yderiv, = lp.parse_fortran(
            fortran_template.format(inner=yd_line, name="yderiv"))
    xyderiv, = lp.parse_fortran(
            fortran_template.format(
                inner=(xd_line + "\n" + yd_line), name="xyderiv"))

    knl = lp.fuse_kernels((xderiv, yderiv))
    knl = lp.set_loop_priority(knl, "e,i,j,k")

    assert len(knl.temporary_variables) == 2

    # This is needed for correctness, otherwise ordering could foul things up.
    knl = lp.assignment_to_subst(knl, "prev")
    knl = lp.assignment_to_subst(knl, "prev_0")

    ctx = ctx_factory()
    lp.auto_test_vs_ref(xyderiv, ctx, knl, parameters=dict(nelements=20, ndofs=4))
コード例 #8
0
ファイル: test_transform.py プロジェクト: cmsquared/loopy
def test_fusion():
    exp_kernel = lp.make_kernel(
         ''' { [i]: 0<=i<n } ''',
         ''' exp[i] = pow(E, z[i])''',
         assumptions="n>0")

    sum_kernel = lp.make_kernel(
        '{ [j]: 0<=j<n }',
        'out2 = sum(j, exp[j])',
        assumptions='n>0')

    knl = lp.fuse_kernels([exp_kernel, sum_kernel])

    print(knl)
コード例 #9
0
ファイル: test_loopy.py プロジェクト: dokempf/loopy
def test_finite_difference_expr_subst(ctx_factory):
    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    grid = np.linspace(0, 2*np.pi, 2048, endpoint=False)
    h = grid[1] - grid[0]
    u = cl.clmath.sin(cl.array.to_device(queue, grid))

    fin_diff_knl = lp.make_kernel(
        "{[i]: 1<=i<=n}",
        "out[i] = -(f[i+1] - f[i-1])/h",
        [lp.GlobalArg("out", shape="n+2"), "..."])

    flux_knl = lp.make_kernel(
        "{[j]: 1<=j<=n}",
        "f[j] = u[j]**2/2",
        [
            lp.GlobalArg("f", shape="n+2"),
            lp.GlobalArg("u", shape="n+2"),
            ])

    fused_knl = lp.fuse_kernels([fin_diff_knl, flux_knl],
            data_flow=[
                ("f", 1, 0)
                ])

    fused_knl = lp.set_options(fused_knl, write_cl=True)
    evt, _ = fused_knl(queue, u=u, h=np.float32(1e-1))

    fused_knl = lp.assignment_to_subst(fused_knl, "f")

    fused_knl = lp.set_options(fused_knl, write_cl=True)

    # This is the real test here: The automatically generated
    # shape expressions are '2+n' and the ones above are 'n+2'.
    # Is loopy smart enough to understand that these are equal?
    evt, _ = fused_knl(queue, u=u, h=np.float32(1e-1))

    fused0_knl = lp.affine_map_inames(fused_knl, "i", "inew", "inew+1=i")

    gpu_knl = lp.split_iname(
            fused0_knl, "inew", 128, outer_tag="g.0", inner_tag="l.0")

    precomp_knl = lp.precompute(
            gpu_knl, "f_subst", "inew_inner", fetch_bounding_box=True)

    precomp_knl = lp.tag_inames(precomp_knl, {"j_0_outer": "unr"})
    precomp_knl = lp.set_options(precomp_knl, return_dict=True)
    evt, _ = precomp_knl(queue, u=u, h=h)
コード例 #10
0
ファイル: test_loopy.py プロジェクト: dokempf/loopy
def test_finite_difference_expr_subst(ctx_factory):
    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    grid = np.linspace(0, 2 * np.pi, 2048, endpoint=False)
    h = grid[1] - grid[0]
    u = cl.clmath.sin(cl.array.to_device(queue, grid))

    fin_diff_knl = lp.make_kernel("{[i]: 1<=i<=n}",
                                  "out[i] = -(f[i+1] - f[i-1])/h",
                                  [lp.GlobalArg("out", shape="n+2"), "..."])

    flux_knl = lp.make_kernel("{[j]: 1<=j<=n}", "f[j] = u[j]**2/2", [
        lp.GlobalArg("f", shape="n+2"),
        lp.GlobalArg("u", shape="n+2"),
    ])

    fused_knl = lp.fuse_kernels([fin_diff_knl, flux_knl],
                                data_flow=[("f", 1, 0)])

    fused_knl = lp.set_options(fused_knl, write_cl=True)
    evt, _ = fused_knl(queue, u=u, h=np.float32(1e-1))

    fused_knl = lp.assignment_to_subst(fused_knl, "f")

    fused_knl = lp.set_options(fused_knl, write_cl=True)

    # This is the real test here: The automatically generated
    # shape expressions are '2+n' and the ones above are 'n+2'.
    # Is loopy smart enough to understand that these are equal?
    evt, _ = fused_knl(queue, u=u, h=np.float32(1e-1))

    fused0_knl = lp.affine_map_inames(fused_knl, "i", "inew", "inew+1=i")

    gpu_knl = lp.split_iname(fused0_knl,
                             "inew",
                             128,
                             outer_tag="g.0",
                             inner_tag="l.0")

    precomp_knl = lp.precompute(gpu_knl,
                                "f_subst",
                                "inew_inner",
                                fetch_bounding_box=True)

    precomp_knl = lp.tag_inames(precomp_knl, {"j_0_outer": "unr"})
    precomp_knl = lp.set_options(precomp_knl, return_dict=True)
    evt, _ = precomp_knl(queue, u=u, h=h)
コード例 #11
0
ファイル: test_fortran.py プロジェクト: inducer/loopy
def test_fuse_kernels(ctx_factory):
    fortran_template = """
        subroutine {name}(nelements, ndofs, result, d, q)
          implicit none
          integer e, i, j, k
          integer nelements, ndofs
          real*8 result(nelements, ndofs, ndofs)
          real*8 q(nelements, ndofs, ndofs)
          real*8 d(ndofs, ndofs)
          real*8 prev

          do e = 1,nelements
            do i = 1,ndofs
              do j = 1,ndofs
                do k = 1,ndofs
                  {inner}
                end do
              end do
            end do
          end do
        end subroutine
        """

    xd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,i,k)
        """
    yd_line = """
        prev = result(e,i,j)
        result(e,i,j) = prev + d(i,k)*q(e,k,j)
        """

    xderiv, = lp.parse_fortran(
            fortran_template.format(inner=xd_line, name="xderiv"))
    yderiv, = lp.parse_fortran(
            fortran_template.format(inner=yd_line, name="yderiv"))
    xyderiv, = lp.parse_fortran(
            fortran_template.format(
                inner=(xd_line + "\n" + yd_line), name="xyderiv"))

    knl = lp.fuse_kernels((xderiv, yderiv), data_flow=[("result", 0, 1)])
    knl = lp.prioritize_loops(knl, "e,i,j,k")

    assert len(knl.temporary_variables) == 2

    ctx = ctx_factory()
    lp.auto_test_vs_ref(xyderiv, ctx, knl, parameters=dict(nelements=20, ndofs=4))
コード例 #12
0
def test_two_kernel_fusion(ctx_factory):
    """
    A simple fusion test of two sets of instructions.
    """

    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    knla = lp.make_kernel("{[i]: 0<=i<10}", """
            out[i] = i
        """)
    knlb = lp.make_kernel("{[j]: 0<=j<10}", """
            out[j] = j+100
        """)
    knl = lp.fuse_kernels([knla, knlb], data_flow=[("out", 0, 1)])
    evt, (out, ) = knl(queue)
    np.testing.assert_allclose(out.get(), np.arange(100, 110))
コード例 #13
0
def test_gnuma_horiz_kernel(ctx_factory, ilp_multiple, Nq, opt_level):
    ctx = ctx_factory()

    filename = "strongVolumeKernels.f90"
    with open(filename, "r") as sourcef:
        source = sourcef.read()

    source = source.replace("datafloat", "real*4")

    hsv_r, hsv_s = [
        knl
        for knl in lp.parse_fortran(source, filename, auto_dependencies=False)
        if "KernelR" in knl.name or "KernelS" in knl.name
    ]
    hsv_r = lp.tag_instructions(hsv_r, "rknl")
    hsv_s = lp.tag_instructions(hsv_s, "sknl")
    hsv = lp.fuse_kernels([hsv_r, hsv_s], ["_r", "_s"])
    #hsv = hsv_s

    from gnuma_loopy_transforms import (fix_euler_parameters,
                                        set_q_storage_format,
                                        set_D_storage_format)

    hsv = lp.fix_parameters(hsv, Nq=Nq)
    hsv = lp.set_loop_priority(hsv, "e,k,j,i")
    hsv = lp.tag_inames(hsv, dict(e="g.0", j="l.1", i="l.0"))
    hsv = lp.assume(hsv, "elements >= 1")

    hsv = fix_euler_parameters(hsv, p_p0=1, p_Gamma=1.4, p_R=1)
    for name in ["Q", "rhsQ"]:
        hsv = set_q_storage_format(hsv, name)

    hsv = set_D_storage_format(hsv)
    #hsv = lp.add_prefetch(hsv, "volumeGeometricFactors")

    ref_hsv = hsv

    if opt_level == 0:
        tap_hsv = hsv

    hsv = lp.add_prefetch(hsv, "D[:,:]")

    if opt_level == 1:
        tap_hsv = hsv

    # turn the first reads into subst rules
    local_prep_var_names = set()
    for insn in lp.find_instructions(hsv, "tag:local_prep"):
        assignee, = insn.assignee_var_names()
        local_prep_var_names.add(assignee)
        hsv = lp.assignment_to_subst(hsv, assignee)

    # precompute fluxes
    hsv = lp.assignment_to_subst(hsv, "JinvD_r")
    hsv = lp.assignment_to_subst(hsv, "JinvD_s")

    r_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:rknl")
    s_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:sknl")

    if ilp_multiple > 1:
        hsv = lp.split_iname(hsv, "k", 2, inner_tag="ilp")
        ilp_inames = ("k_inner", )
        flux_ilp_inames = ("kk", )
    else:
        ilp_inames = ()
        flux_ilp_inames = ()

    rtmps = []
    stmps = []

    flux_store_idx = 0

    for rflux_insn, sflux_insn in zip(r_fluxes, s_fluxes):
        for knl_tag, insn, flux_inames, tmps, flux_precomp_inames in [
            ("rknl", rflux_insn, (
                "j",
                "n",
            ), rtmps, (
                "jj",
                "ii",
            )),
            ("sknl", sflux_insn, (
                "i",
                "n",
            ), stmps, (
                "ii",
                "jj",
            )),
        ]:
            flux_var, = insn.assignee_var_names()
            print(insn)

            reader, = lp.find_instructions(
                hsv,
                "tag:{knl_tag} and reads:{flux_var}".format(knl_tag=knl_tag,
                                                            flux_var=flux_var))

            hsv = lp.assignment_to_subst(hsv, flux_var)

            flux_store_name = "flux_store_%d" % flux_store_idx
            flux_store_idx += 1
            tmps.append(flux_store_name)

            hsv = lp.precompute(hsv,
                                flux_var + "_subst",
                                flux_inames + ilp_inames,
                                temporary_name=flux_store_name,
                                precompute_inames=flux_precomp_inames +
                                flux_ilp_inames,
                                default_tag=None)
            if flux_var.endswith("_s"):
                hsv = lp.tag_array_axes(hsv, flux_store_name, "N0,N1,N2?")
            else:
                hsv = lp.tag_array_axes(hsv, flux_store_name, "N1,N0,N2?")

            n_iname = "n_" + flux_var.replace("_r", "").replace("_s", "")
            if n_iname.endswith("_0"):
                n_iname = n_iname[:-2]
            hsv = lp.rename_iname(hsv,
                                  "n",
                                  n_iname,
                                  within="id:" + reader.id,
                                  existing_ok=True)

    hsv = lp.tag_inames(hsv, dict(ii="l.0", jj="l.1"))
    for iname in flux_ilp_inames:
        hsv = lp.tag_inames(hsv, {iname: "ilp"})

    hsv = lp.alias_temporaries(hsv, rtmps)
    hsv = lp.alias_temporaries(hsv, stmps)

    if opt_level == 2:
        tap_hsv = hsv

    for prep_var_name in local_prep_var_names:
        if prep_var_name.startswith("Jinv") or "_s" in prep_var_name:
            continue
        hsv = lp.precompute(
            hsv, lp.find_one_rule_matching(hsv, prep_var_name + "_*subst*"))

    if opt_level == 3:
        tap_hsv = hsv

    hsv = lp.add_prefetch(hsv, "Q[ii,jj,k,:,:,e]", sweep_inames=ilp_inames)

    if opt_level == 4:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(
            tap_hsv, dict(Q_dim_field_inner="unr", Q_dim_field_outer="unr"))

    hsv = lp.buffer_array(hsv,
                          "rhsQ",
                          ilp_inames,
                          fetch_bounding_box=True,
                          default_tag="for",
                          init_expression="0",
                          store_expression="base + buffer")

    if opt_level == 5:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(
            tap_hsv,
            dict(rhsQ_init_field_inner="unr",
                 rhsQ_store_field_inner="unr",
                 rhsQ_init_field_outer="unr",
                 rhsQ_store_field_outer="unr",
                 Q_dim_field_inner="unr",
                 Q_dim_field_outer="unr"))

    # buffer axes need to be vectorized in order for this to work
    hsv = lp.tag_array_axes(hsv, "rhsQ_buf", "c?,vec,c")
    hsv = lp.tag_array_axes(hsv, "Q_fetch", "c?,vec,c")
    hsv = lp.tag_array_axes(hsv, "D_fetch", "f,f")
    hsv = lp.tag_inames(hsv, {
        "Q_dim_k": "unr",
        "rhsQ_init_k": "unr",
        "rhsQ_store_k": "unr"
    },
                        ignore_nonexistent=True)

    if opt_level == 6:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(
            tap_hsv,
            dict(rhsQ_init_field_inner="unr",
                 rhsQ_store_field_inner="unr",
                 rhsQ_init_field_outer="unr",
                 rhsQ_store_field_outer="unr",
                 Q_dim_field_inner="unr",
                 Q_dim_field_outer="unr"))

    hsv = lp.tag_inames(
        hsv,
        dict(rhsQ_init_field_inner="vec",
             rhsQ_store_field_inner="vec",
             rhsQ_init_field_outer="unr",
             rhsQ_store_field_outer="unr",
             Q_dim_field_inner="vec",
             Q_dim_field_outer="unr"))

    if opt_level == 7:
        tap_hsv = hsv

    hsv = lp.collect_common_factors_on_increment(
        hsv, "rhsQ_buf", vary_by_axes=(0, ) if ilp_multiple > 1 else ())

    if opt_level >= 8:
        tap_hsv = hsv

    hsv = tap_hsv

    if 1:
        print("OPS")
        op_poly = lp.get_op_poly(hsv)
        print(lp.stringify_stats_mapping(op_poly))

        print("MEM")
        gmem_poly = lp.sum_mem_access_to_bytes(lp.get_gmem_access_poly(hsv))
        print(lp.stringify_stats_mapping(gmem_poly))

    hsv = lp.set_options(hsv,
                         cl_build_options=[
                             "-cl-denorms-are-zero",
                             "-cl-fast-relaxed-math",
                             "-cl-finite-math-only",
                             "-cl-mad-enable",
                             "-cl-no-signed-zeros",
                         ])

    hsv = hsv.copy(name="horizontalStrongVolumeKernel")

    results = lp.auto_test_vs_ref(ref_hsv,
                                  ctx,
                                  hsv,
                                  parameters=dict(elements=300),
                                  quiet=True)

    elapsed = results["elapsed_wall"]

    print("elapsed", elapsed)
コード例 #14
0
def test_write_block_matrix_fusion(ctx_factory):
    """
    A slightly more complicated fusion test, where all
    sub-kernels write into the same global matrix, but
    in well-defined separate blocks. This tests makes sure
    data flow specification is preserved during fusion for
    matrix-assembly-like programs.
    """

    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    def init_global_mat_prg():
        return lp.make_kernel(
            ["{[idof]: 0 <= idof < n}", "{[jdof]: 0 <= jdof < m}"],
            """
                result[idof, jdof]  = 0 {id=init}
            """,
            [
                lp.GlobalArg("result", None, shape="n, m", offset=lp.auto),
                lp.ValueArg("n, m", np.int32),
                "...",
            ],
            options=lp.Options(return_dict=True),
            default_offset=lp.auto,
            name="init_a_global_matrix",
        )

    def write_into_mat_prg():
        return lp.make_kernel(
            ["{[idof]: 0 <= idof < ndofs}", "{[jdof]: 0 <= jdof < mdofs}"],
            """
                result[offset_i + idof, offset_j + jdof] = mat[idof, jdof]
            """,
            [
                lp.GlobalArg("result", None, shape="n, m", offset=lp.auto),
                lp.ValueArg("n, m", np.int32),
                lp.GlobalArg("mat", None, shape="ndofs, mdofs",
                             offset=lp.auto),
                lp.ValueArg("offset_i", np.int32),
                lp.ValueArg("offset_j", np.int32),
                "...",
            ],
            options=lp.Options(return_dict=True),
            default_offset=lp.auto,
            name="write_into_global_matrix",
        )

    # Construct a 2x2 diagonal matrix with
    # random 5x5 blocks on the block-diagonal,
    # and zeros elsewhere
    n = 10
    block_n = 5
    mat1 = np.random.randn(block_n, block_n)
    mat2 = np.random.randn(block_n, block_n)
    answer = np.block([[mat1, np.zeros((block_n, block_n))],
                       [np.zeros((block_n, block_n)), mat2]])
    kwargs = {"n": n, "m": n}

    # Do some renaming of individual programs before fusion
    kernels = [init_global_mat_prg()]
    for idx, (offset, mat) in enumerate([(0, mat1), (block_n, mat2)]):
        knl = lp.rename_argument(write_into_mat_prg(), "mat", f"mat_{idx}")
        kwargs[f"mat_{idx}"] = mat

        for iname in knl.default_entrypoint.all_inames():
            knl = lp.rename_iname(knl, iname, f"{iname}_{idx}")

        knl = lp.rename_argument(knl, "ndofs", f"ndofs_{idx}")
        knl = lp.rename_argument(knl, "mdofs", f"mdofs_{idx}")
        kwargs[f"ndofs_{idx}"] = block_n
        kwargs[f"mdofs_{idx}"] = block_n

        knl = lp.rename_argument(knl, "offset_i", f"offset_i_{idx}")
        knl = lp.rename_argument(knl, "offset_j", f"offset_j_{idx}")
        kwargs[f"offset_i_{idx}"] = offset
        kwargs[f"offset_j_{idx}"] = offset

        kernels.append(knl)

    fused_knl = lp.fuse_kernels(
        kernels,
        data_flow=[("result", 0, 1), ("result", 1, 2)],
    )
    fused_knl = lp.add_nosync(fused_knl,
                              "global",
                              "writes:result",
                              "writes:result",
                              bidirectional=True,
                              force=True)
    evt, result = fused_knl(queue, **kwargs)
    result = result["result"]
    np.testing.assert_allclose(result, answer)
コード例 #15
0
ファイル: fusion.py プロジェクト: rjpower/loony
     ''' exp[i] = E ** z[i]''',
     assumptions="n>0")

sum_kernel = lp.make_kernel(
    '{ [j]: 0<=j<n }',
    'total = sum(j, exp[j])',
    assumptions='n>0')

softmax_kernel = lp.make_kernel(
    '{ [k]: 0<=k<n }',
    'out3[k] = exp[k] / total',
    [
        lp.GlobalArg("total", None, shape=()),
        "..."
        ],
    assumptions='n>0')

big_honkin_knl = lp.fuse_kernels([exp_kernel, sum_kernel, softmax_kernel])
#big_honkin_knl = lp.fuse_kernels([exp_kernel, sum_kernel])

print softmax_kernel.arg_dict["total"].shape

#big_honkin_knl = lp.tag_inames(big_honkin_knl, dict(i="l.0"))

big_honkin_knl = lp.set_options(big_honkin_knl, write_cl=True)



a = np.random.randn(20)
big_honkin_knl = big_honkin_knl(queue, z=a, E=np.float64(5))
コード例 #16
0
#Raw coupling
couplingknl = lp.make_kernel("{ [i_node, j_node]: 0<=i_node, j_node<n_node}",
                             coupling_raw,
                             target=target)
couplingknl = lp.add_dtypes(
    couplingknl, {
        "lengths": np.float32,
        "state": np.float32,
        "weights": np.float32,
        "theta_i": np.float32,
        "rec_speed_dt": np.float32
    })
couplingknl = lp.split_iname(couplingknl, "j_node", 1, outer_tag='l.0')
#Raw model
modelknl = lp.make_kernel("{ [i_node]: 0<=i_node<n_node}",
                          model_raw,
                          target=target)
modelknl = lp.add_dtypes(modelknl, {
    "state": np.float32,
    "theta_i": np.float32,
    "tavg": np.float32
})
# Fuse
knls = couplingknl, modelknl
data_flow = [('state', 0, 1)]
knl = lp.fuse_kernels(knls, data_flow=data_flow)
print(knl)
print("****")
knl = lp.split_iname(knl, "i_node", 128, outer_tag='g.0')
print(lp.generate_code_v2(knl).all_code())
コード例 #17
0
ファイル: test_numa_diff.py プロジェクト: cmsquared/loopy
def test_gnuma_horiz_kernel(ctx_factory, ilp_multiple, Nq, opt_level):
    ctx = ctx_factory()

    filename = "strongVolumeKernels.f90"
    with open(filename, "r") as sourcef:
        source = sourcef.read()

    source = source.replace("datafloat", "real*4")

    hsv_r, hsv_s = [
           knl for knl in lp.parse_fortran(source, filename, auto_dependencies=False)
           if "KernelR" in knl.name or "KernelS" in knl.name
           ]
    hsv_r = lp.tag_instructions(hsv_r, "rknl")
    hsv_s = lp.tag_instructions(hsv_s, "sknl")
    hsv = lp.fuse_kernels([hsv_r, hsv_s], ["_r", "_s"])
    #hsv = hsv_s

    from gnuma_loopy_transforms import (
          fix_euler_parameters,
          set_q_storage_format, set_D_storage_format)

    hsv = lp.fix_parameters(hsv, Nq=Nq)
    hsv = lp.set_loop_priority(hsv, "e,k,j,i")
    hsv = lp.tag_inames(hsv, dict(e="g.0", j="l.1", i="l.0"))
    hsv = lp.assume(hsv, "elements >= 1")

    hsv = fix_euler_parameters(hsv, p_p0=1, p_Gamma=1.4, p_R=1)
    for name in ["Q", "rhsQ"]:
        hsv = set_q_storage_format(hsv, name)

    hsv = set_D_storage_format(hsv)
    #hsv = lp.add_prefetch(hsv, "volumeGeometricFactors")

    ref_hsv = hsv

    if opt_level == 0:
        tap_hsv = hsv

    hsv = lp.add_prefetch(hsv, "D[:,:]")

    if opt_level == 1:
        tap_hsv = hsv

    # turn the first reads into subst rules
    local_prep_var_names = set()
    for insn in lp.find_instructions(hsv, "tag:local_prep"):
        assignee, = insn.assignee_var_names()
        local_prep_var_names.add(assignee)
        hsv = lp.assignment_to_subst(hsv, assignee)

    # precompute fluxes
    hsv = lp.assignment_to_subst(hsv, "JinvD_r")
    hsv = lp.assignment_to_subst(hsv, "JinvD_s")

    r_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:rknl")
    s_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:sknl")

    if ilp_multiple > 1:
        hsv = lp.split_iname(hsv, "k", 2, inner_tag="ilp")
        ilp_inames = ("k_inner",)
        flux_ilp_inames = ("kk",)
    else:
        ilp_inames = ()
        flux_ilp_inames = ()

    rtmps = []
    stmps = []

    flux_store_idx = 0

    for rflux_insn, sflux_insn in zip(r_fluxes, s_fluxes):
        for knl_tag, insn, flux_inames, tmps, flux_precomp_inames in [
                  ("rknl", rflux_insn, ("j", "n",), rtmps, ("jj", "ii",)),
                  ("sknl", sflux_insn, ("i", "n",), stmps, ("ii", "jj",)),
                  ]:
            flux_var, = insn.assignee_var_names()
            print(insn)

            reader, = lp.find_instructions(hsv,
                  "tag:{knl_tag} and reads:{flux_var}"
                  .format(knl_tag=knl_tag, flux_var=flux_var))

            hsv = lp.assignment_to_subst(hsv, flux_var)

            flux_store_name = "flux_store_%d" % flux_store_idx
            flux_store_idx += 1
            tmps.append(flux_store_name)

            hsv = lp.precompute(hsv, flux_var+"_subst", flux_inames + ilp_inames,
                temporary_name=flux_store_name,
                precompute_inames=flux_precomp_inames + flux_ilp_inames,
                default_tag=None)
            if flux_var.endswith("_s"):
                hsv = lp.tag_data_axes(hsv, flux_store_name, "N0,N1,N2?")
            else:
                hsv = lp.tag_data_axes(hsv, flux_store_name, "N1,N0,N2?")

            n_iname = "n_"+flux_var.replace("_r", "").replace("_s", "")
            if n_iname.endswith("_0"):
                n_iname = n_iname[:-2]
            hsv = lp.rename_iname(hsv, "n", n_iname, within="id:"+reader.id,
                  existing_ok=True)

    hsv = lp.tag_inames(hsv, dict(ii="l.0", jj="l.1"))
    for iname in flux_ilp_inames:
        hsv = lp.tag_inames(hsv, {iname: "ilp"})

    hsv = lp.alias_temporaries(hsv, rtmps)
    hsv = lp.alias_temporaries(hsv, stmps)

    if opt_level == 2:
        tap_hsv = hsv

    for prep_var_name in local_prep_var_names:
        if prep_var_name.startswith("Jinv") or "_s" in prep_var_name:
            continue
        hsv = lp.precompute(hsv,
            lp.find_one_rule_matching(hsv, prep_var_name+"_*subst*"))

    if opt_level == 3:
        tap_hsv = hsv

    hsv = lp.add_prefetch(hsv, "Q[ii,jj,k,:,:,e]", sweep_inames=ilp_inames)

    if opt_level == 4:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(tap_hsv, dict(
              Q_dim_field_inner="unr",
              Q_dim_field_outer="unr"))

    hsv = lp.buffer_array(hsv, "rhsQ", ilp_inames,
          fetch_bounding_box=True, default_tag="for",
          init_expression="0", store_expression="base + buffer")

    if opt_level == 5:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(tap_hsv, dict(
              rhsQ_init_field_inner="unr", rhsQ_store_field_inner="unr",
              rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr",
              Q_dim_field_inner="unr",
              Q_dim_field_outer="unr"))

    # buffer axes need to be vectorized in order for this to work
    hsv = lp.tag_data_axes(hsv, "rhsQ_buf", "c?,vec,c")
    hsv = lp.tag_data_axes(hsv, "Q_fetch", "c?,vec,c")
    hsv = lp.tag_data_axes(hsv, "D_fetch", "f,f")
    hsv = lp.tag_inames(hsv,
            {"Q_dim_k": "unr", "rhsQ_init_k": "unr", "rhsQ_store_k": "unr"},
            ignore_nonexistent=True)

    if opt_level == 6:
        tap_hsv = hsv
        tap_hsv = lp.tag_inames(tap_hsv, dict(
              rhsQ_init_field_inner="unr", rhsQ_store_field_inner="unr",
              rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr",
              Q_dim_field_inner="unr",
              Q_dim_field_outer="unr"))

    hsv = lp.tag_inames(hsv, dict(
          rhsQ_init_field_inner="vec", rhsQ_store_field_inner="vec",
          rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr",
          Q_dim_field_inner="vec",
          Q_dim_field_outer="unr"))

    if opt_level == 7:
        tap_hsv = hsv

    hsv = lp.collect_common_factors_on_increment(hsv, "rhsQ_buf",
          vary_by_axes=(0,) if ilp_multiple > 1 else ())

    if opt_level >= 8:
        tap_hsv = hsv

    hsv = tap_hsv

    if 1:
        print("OPS")
        op_poly = lp.get_op_poly(hsv)
        print(lp.stringify_stats_mapping(op_poly))

        print("MEM")
        gmem_poly = lp.sum_mem_access_to_bytes(lp.get_gmem_access_poly(hsv))
        print(lp.stringify_stats_mapping(gmem_poly))

    hsv = lp.set_options(hsv, cl_build_options=[
         "-cl-denorms-are-zero",
         "-cl-fast-relaxed-math",
         "-cl-finite-math-only",
         "-cl-mad-enable",
         "-cl-no-signed-zeros",
         ])

    hsv = hsv.copy(name="horizontalStrongVolumeKernel")

    results = lp.auto_test_vs_ref(ref_hsv, ctx, hsv, parameters=dict(elements=300),
            quiet=True)

    elapsed = results["elapsed_wall"]

    print("elapsed", elapsed)
コード例 #18
0
ファイル: direct.py プロジェクト: kaushikcfd/meshmode
    def __call__(self, ary):
        from meshmode.dof_array import DOFArray
        if not isinstance(ary, DOFArray):
            raise TypeError("non-array passed to discretization connection")

        actx = ary.array_context

        @memoize_in(actx, (
            DirectDiscretizationConnection,
            "resample_by_mat_knl",
            self.is_surjective,
        ))
        def mat_knl():
            if self.is_surjective:
                domains = [
                    """
                        {[iel, idof, j]:
                        0<=iel<nelements and
                        0<=idof<n_to_nodes and
                        0<=j<n_from_nodes}
                        """,
                ]

                instructions = """
                result[to_element_indices[iel], idof] \
                        = sum(j, resample_mat[idof, j] \
                        * ary[from_element_indices[iel], j])
                        """
            else:
                domains = [
                    """
                        {[iel_init, idof_init]:
                        0<=iel_init<nelements_result and
                        0<=idof_init<n_to_nodes}
                        """,
                    """
                        {[iel, idof, j]:
                        0<=iel<nelements and
                        0<=idof<n_to_nodes and
                        0<=j<n_from_nodes}
                        """,
                ]

                instructions = """
                result[iel_init, idof_init] = 0 {id=init}
                ... gbarrier {id=barrier, dep=init}
                result[to_element_indices[iel], idof] \
                        = sum(j, resample_mat[idof, j] \
                        * ary[from_element_indices[iel], j]) {dep=barrier}
                        """
            knl = make_loopy_program(
                domains,
                instructions, [
                    lp.GlobalArg("result",
                                 None,
                                 shape="nelements_result, n_to_nodes",
                                 offset=lp.auto),
                    lp.GlobalArg("ary",
                                 None,
                                 shape="nelements_vec, n_from_nodes",
                                 offset=lp.auto),
                    lp.ValueArg("nelements_result", np.int32),
                    lp.ValueArg("nelements_vec", np.int32),
                    lp.ValueArg("n_from_nodes", np.int32),
                    "...",
                ],
                name="resample_by_mat")

            return knl

        @memoize_in(actx, (DirectDiscretizationConnection,
                           "resample_by_picking_knl", self.is_surjective))
        def pick_knl():
            if self.is_surjective:
                domains = [
                    """{[iel, idof]:
                        0<=iel<nelements and
                        0<=idof<n_to_nodes}"""
                ]
                instructions = """
                result[to_element_indices[iel], idof] \
                    = ary[from_element_indices[iel], pick_list[idof]]
                """
            else:
                domains = [
                    """
                        {[iel_init, idof_init]:
                        0<=iel_init<nelements_result and
                        0<=idof_init<n_to_nodes}
                        """, """
                        {[iel, idof]:
                        0<=iel<nelements and
                        0<=idof<n_to_nodes}
                        """
                ]
                instructions = """
                result[iel_init, idof_init] = 0 {id=init}
                ... gbarrier {id=barrier, dep=init}
                result[to_element_indices[iel], idof] \
                    = ary[from_element_indices[iel], pick_list[idof]] {dep=barrier}
                """
            knl = make_loopy_program(
                domains,
                instructions, [
                    lp.GlobalArg("result",
                                 None,
                                 shape="nelements_result, n_to_nodes",
                                 offset=lp.auto),
                    lp.GlobalArg("ary",
                                 None,
                                 shape="nelements_vec, n_from_nodes",
                                 offset=lp.auto),
                    lp.ValueArg("nelements_result", np.int32),
                    lp.ValueArg("nelements_vec", np.int32),
                    lp.ValueArg("n_from_nodes", np.int32),
                    "...",
                ],
                name="resample_by_picking")

            return knl

        if ary.shape != (len(self.from_discr.groups), ):
            raise ValueError("invalid shape of incoming resampling data")

        group_idx_to_result = []

        for i_tgrp, (tgrp,
                     cgrp) in enumerate(zip(self.to_discr.groups,
                                            self.groups)):

            kernels = []  # get kernels for each batch; to be fused eventually
            kwargs = {}  # kwargs to the fused kernel
            for i_batch, batch in enumerate(cgrp.batches):
                if batch.from_element_indices.size == 0:
                    continue

                point_pick_indices = self._resample_point_pick_indices(
                    actx, i_tgrp, i_batch)

                if point_pick_indices is None:
                    knl = mat_knl()
                    knl = lp.rename_argument(knl, "resample_mat",
                                             f"resample_mat_{i_batch}")
                    kwargs[f"resample_mat_{i_batch}"] = (self._resample_matrix(
                        actx, i_tgrp, i_batch))
                else:
                    knl = pick_knl()
                    knl = lp.rename_argument(knl, "pick_list",
                                             f"pick_list_{i_batch}")
                    kwargs[f"pick_list_{i_batch}"] = point_pick_indices

                # {{{ enforce different namespaces for the kernels

                for iname in knl.all_inames():
                    knl = lp.rename_iname(knl, iname, f"{iname}_{i_batch}")

                knl = lp.rename_argument(knl, "ary", f"ary_{i_batch}")
                knl = lp.rename_argument(knl, "from_element_indices",
                                         f"from_element_indices_{i_batch}")
                knl = lp.rename_argument(knl, "to_element_indices",
                                         f"to_element_indices_{i_batch}")
                knl = lp.rename_argument(knl, "nelements",
                                         f"nelements_{i_batch}")

                # }}}

                kwargs[f"ary_{i_batch}"] = ary[batch.from_group_index]
                kwargs[f"from_element_indices_{i_batch}"] = (
                    batch.from_element_indices)
                kwargs[f"to_element_indices_{i_batch}"] = (
                    batch.to_element_indices)

                kernels.append(knl)

            fused_knl = lp.fuse_kernels(kernels)
            # order of operations doesn't matter
            fused_knl = lp.add_nosync(fused_knl,
                                      "global",
                                      "writes:result",
                                      "writes:result",
                                      bidirectional=True,
                                      force=True)

            result_dict = actx.call_loopy(fused_knl,
                                          nelements_result=tgrp.nelements,
                                          n_to_nodes=tgrp.nunit_dofs,
                                          **kwargs)

            group_idx_to_result.append(result_dict["result"])

        from meshmode.dof_array import DOFArray
        return DOFArray.from_list(actx, group_idx_to_result)