def test_add_nosync(): orig_knl = lp.make_kernel("{[i]: 0<=i<10}", """ <>tmp[i] = 10 {id=insn1} <>tmp2[i] = 10 {id=insn2} <>tmp3[2*i] = 0 {id=insn3} <>tmp4 = 1 + tmp3[2*i] {id=insn4} <>tmp5[i] = 0 {id=insn5,groups=g1} tmp5[i] = 1 {id=insn6,conflicts=g1} """) orig_knl = lp.set_temporary_scope(orig_knl, "tmp3", "local") orig_knl = lp.set_temporary_scope(orig_knl, "tmp5", "local") # No dependency present - don't add nosync knl = lp.add_nosync(orig_knl, "any", "writes:tmp", "writes:tmp2", empty_ok=True) assert frozenset() == knl.id_to_insn["insn2"].no_sync_with # Dependency present knl = lp.add_nosync(orig_knl, "local", "writes:tmp3", "reads:tmp3") assert frozenset() == knl.id_to_insn["insn3"].no_sync_with assert frozenset([("insn3", "local")]) == knl.id_to_insn["insn4"].no_sync_with # Bidirectional knl = lp.add_nosync( orig_knl, "local", "writes:tmp3", "reads:tmp3", bidirectional=True) assert frozenset([("insn4", "local")]) == knl.id_to_insn["insn3"].no_sync_with assert frozenset([("insn3", "local")]) == knl.id_to_insn["insn4"].no_sync_with # Groups knl = lp.add_nosync(orig_knl, "local", "insn5", "insn6") assert frozenset([("insn5", "local")]) == knl.id_to_insn["insn6"].no_sync_with
def test_add_nosync(): orig_knl = lp.make_kernel("{[i]: 0<=i<10}", """ <>tmp[i] = 10 {id=insn1} <>tmp2[i] = 10 {id=insn2} <>tmp3[2*i] = 0 {id=insn3} <>tmp4 = 1 + tmp3[2*i] {id=insn4} <>tmp5[i] = 0 {id=insn5,groups=g1} tmp5[i] = 1 {id=insn6,conflicts=g1} """) orig_knl = lp.set_temporary_scope(orig_knl, "tmp3", "local") orig_knl = lp.set_temporary_scope(orig_knl, "tmp5", "local") # No dependency present - don't add nosync knl = lp.add_nosync(orig_knl, "any", "writes:tmp", "writes:tmp2", empty_ok=True) assert frozenset() == knl.id_to_insn["insn2"].no_sync_with # Dependency present knl = lp.add_nosync(orig_knl, "local", "writes:tmp3", "reads:tmp3") assert frozenset() == knl.id_to_insn["insn3"].no_sync_with assert frozenset([("insn3", "local")]) == knl.id_to_insn["insn4"].no_sync_with # Bidirectional knl = lp.add_nosync( orig_knl, "local", "writes:tmp3", "reads:tmp3", bidirectional=True) assert frozenset([("insn4", "local")]) == knl.id_to_insn["insn3"].no_sync_with assert frozenset([("insn3", "local")]) == knl.id_to_insn["insn4"].no_sync_with # Groups knl = lp.add_nosync(orig_knl, "local", "insn5", "insn6") assert frozenset([("insn5", "local")]) == knl.id_to_insn["insn6"].no_sync_with
def finish(self): new_instructions = [] insns_to_insert = dict( (insn.id, insn) for insn in self.insns_to_insert) for orig_insn in self.kernel.instructions: if orig_insn.id in self.insns_to_update: new_instructions.append(self.insns_to_update[orig_insn.id]) else: new_instructions.append(orig_insn) new_instructions.extend( sorted(insns_to_insert.values(), key=lambda insn: insn.id)) self.updated_iname_to_tag.update(self.kernel.iname_to_tag) self.updated_temporary_variables.update( self.kernel.temporary_variables) new_domains = list(self.kernel.domains) import islpy as isl if self.new_subdomain.dim(isl.dim_type.set) > 0: new_domains.append(self.new_subdomain) kernel = self.kernel.copy( domains=new_domains, instructions=new_instructions, iname_to_tag=self.updated_iname_to_tag, temporary_variables=self.updated_temporary_variables, overridden_get_grid_sizes_for_insn_ids=None) # Add nosync directives to any saves or reloads that were added with a # potential dependency chain. from loopy.kernel.tools import get_subkernels for subkernel in get_subkernels(kernel): relevant_insns = self.subkernel_to_newly_added_insn_ids[subkernel] from itertools import product for temporary in self.temporary_to_reload_ids: for source, sink in product( relevant_insns & self.temporary_to_reload_ids[temporary], relevant_insns & self.temporary_to_save_ids[temporary]): kernel = lp.add_nosync(kernel, "global", source, sink) from loopy.kernel.tools import assign_automatic_axes return assign_automatic_axes(kernel)
def finish(self): new_instructions = [] insns_to_insert = dict((insn.id, insn) for insn in self.insns_to_insert) for orig_insn in self.kernel.instructions: if orig_insn.id in self.insns_to_update: new_instructions.append(self.insns_to_update[orig_insn.id]) else: new_instructions.append(orig_insn) new_instructions.extend( sorted(insns_to_insert.values(), key=lambda insn: insn.id)) self.updated_iname_to_tags.update(self.kernel.iname_to_tags) self.updated_temporary_variables.update(self.kernel.temporary_variables) new_domains = list(self.kernel.domains) import islpy as isl if self.new_subdomain.dim(isl.dim_type.set) > 0: new_domains.append(self.new_subdomain) kernel = self.kernel.copy( domains=new_domains, instructions=new_instructions, iname_to_tags=self.updated_iname_to_tags, temporary_variables=self.updated_temporary_variables, overridden_get_grid_sizes_for_insn_ids=None) # Add nosync directives to any saves or reloads that were added with a # potential dependency chain. from loopy.kernel.tools import get_subkernels for subkernel in get_subkernels(kernel): relevant_insns = self.subkernel_to_newly_added_insn_ids[subkernel] from itertools import product for temporary in self.temporary_to_reload_ids: for source, sink in product( relevant_insns & self.temporary_to_reload_ids[temporary], relevant_insns & self.temporary_to_save_ids[temporary]): kernel = lp.add_nosync(kernel, "global", source, sink) from loopy.kernel.tools import assign_automatic_axes return assign_automatic_axes(kernel)
def test_gnuma_horiz_kernel(ctx_factory, ilp_multiple, Nq, opt_level): # noqa pytest.importorskip("fparser") ctx = ctx_factory() filename = os.path.join(os.path.dirname(__file__), "strongVolumeKernels.f90") with open(filename) as sourcef: source = sourcef.read() source = source.replace("datafloat", "real*4") program = lp.parse_fortran(source, filename, seq_dependencies=False) hsv_r, hsv_s = program["strongVolumeKernelR"], program[ "strongVolumeKernelS"] hsv_r = lp.tag_instructions(hsv_r, "rknl") hsv_s = lp.tag_instructions(hsv_s, "sknl") hsv = lp.fuse_kernels([hsv_r, hsv_s], ["_r", "_s"]) #hsv = hsv_s hsv = lp.add_nosync(hsv, "any", "writes:rhsQ", "writes:rhsQ", force=True) from gnuma_loopy_transforms import (fix_euler_parameters, set_q_storage_format, set_D_storage_format) hsv = lp.fix_parameters(hsv, Nq=Nq) hsv = lp.prioritize_loops(hsv, "e,k,j,i") hsv = lp.tag_inames(hsv, dict(e="g.0", j="l.1", i="l.0")) hsv = lp.assume(hsv, "elements >= 1") hsv = fix_euler_parameters(hsv, p_p0=1, p_Gamma=1.4, p_R=1) from loopy.frontend.fortran.translator import specialize_fortran_division hsv = specialize_fortran_division(hsv) for name in ["Q", "rhsQ"]: hsv = set_q_storage_format(hsv, name) hsv = set_D_storage_format(hsv) #hsv = lp.add_prefetch(hsv, "volumeGeometricFactors") ref_hsv = hsv if opt_level == 0: tap_hsv = hsv hsv = lp.add_prefetch(hsv, "D[:,:]", fetch_outer_inames="e", default_tag="l.auto") if opt_level == 1: tap_hsv = hsv # turn the first reads into subst rules local_prep_var_names = set() for insn in lp.find_instructions(hsv, "tag:local_prep"): assignee, = insn.assignee_var_names() local_prep_var_names.add(assignee) hsv = lp.assignment_to_subst(hsv, assignee) # precompute fluxes hsv = lp.assignment_to_subst(hsv, "JinvD_r") hsv = lp.assignment_to_subst(hsv, "JinvD_s") r_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:rknl") s_fluxes = lp.find_instructions(hsv, "tag:compute_fluxes and tag:sknl") if ilp_multiple > 1: hsv = lp.split_iname(hsv, "k", 2, inner_tag="ilp") ilp_inames = ("k_inner", ) flux_ilp_inames = ("kk", ) else: ilp_inames = () flux_ilp_inames = () rtmps = [] stmps = [] flux_store_idx = 0 for rflux_insn, sflux_insn in zip(r_fluxes, s_fluxes): for knl_tag, insn, flux_inames, tmps, flux_precomp_inames in [ ("rknl", rflux_insn, ( "j", "n", ), rtmps, ( "jj", "ii", )), ("sknl", sflux_insn, ( "i", "n", ), stmps, ( "ii", "jj", )), ]: flux_var, = insn.assignee_var_names() print(insn) reader, = lp.find_instructions( hsv, "tag:{knl_tag} and reads:{flux_var}".format(knl_tag=knl_tag, flux_var=flux_var)) hsv = lp.assignment_to_subst(hsv, flux_var) flux_store_name = "flux_store_%d" % flux_store_idx flux_store_idx += 1 tmps.append(flux_store_name) hsv = lp.precompute(hsv, flux_var + "_subst", flux_inames + ilp_inames, temporary_name=flux_store_name, precompute_inames=flux_precomp_inames + flux_ilp_inames, default_tag=None) if flux_var.endswith("_s"): hsv = lp.tag_array_axes(hsv, flux_store_name, "N0,N1,N2?") else: hsv = lp.tag_array_axes(hsv, flux_store_name, "N1,N0,N2?") n_iname = "n_" + flux_var.replace("_r", "").replace("_s", "") if n_iname.endswith("_0"): n_iname = n_iname[:-2] hsv = lp.rename_iname(hsv, "n", n_iname, within="id:" + reader.id, existing_ok=True) hsv = lp.tag_inames(hsv, dict(ii="l.0", jj="l.1")) for iname in flux_ilp_inames: hsv = lp.tag_inames(hsv, {iname: "ilp"}) hsv = lp.alias_temporaries(hsv, rtmps) hsv = lp.alias_temporaries(hsv, stmps) if opt_level == 2: tap_hsv = hsv for prep_var_name in local_prep_var_names: if prep_var_name.startswith("Jinv") or "_s" in prep_var_name: continue hsv = lp.precompute(hsv, lp.find_one_rule_matching( hsv, prep_var_name + "_*subst*"), default_tag="l.auto") if opt_level == 3: tap_hsv = hsv hsv = lp.add_prefetch(hsv, "Q[ii,jj,k,:,:,e]", sweep_inames=ilp_inames, default_tag="l.auto") if opt_level == 4: tap_hsv = hsv tap_hsv = lp.tag_inames( tap_hsv, dict(Q_dim_field_inner="unr", Q_dim_field_outer="unr")) hsv = lp.buffer_array(hsv, "rhsQ", ilp_inames, fetch_bounding_box=True, default_tag="for", init_expression="0", store_expression="base + buffer") if opt_level == 5: tap_hsv = hsv tap_hsv = lp.tag_inames( tap_hsv, dict(rhsQ_init_field_inner="unr", rhsQ_store_field_inner="unr", rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr", Q_dim_field_inner="unr", Q_dim_field_outer="unr")) # buffer axes need to be vectorized in order for this to work hsv = lp.tag_array_axes(hsv, "rhsQ_buf", "c?,vec,c") hsv = lp.tag_array_axes(hsv, "Q_fetch", "c?,vec,c") hsv = lp.tag_array_axes(hsv, "D_fetch", "f,f") hsv = lp.tag_inames(hsv, { "Q_dim_k": "unr", "rhsQ_init_k": "unr", "rhsQ_store_k": "unr" }, ignore_nonexistent=True) if opt_level == 6: tap_hsv = hsv tap_hsv = lp.tag_inames( tap_hsv, dict(rhsQ_init_field_inner="unr", rhsQ_store_field_inner="unr", rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr", Q_dim_field_inner="unr", Q_dim_field_outer="unr")) hsv = lp.tag_inames( hsv, dict(rhsQ_init_field_inner="vec", rhsQ_store_field_inner="vec", rhsQ_init_field_outer="unr", rhsQ_store_field_outer="unr", Q_dim_field_inner="vec", Q_dim_field_outer="unr")) if opt_level == 7: tap_hsv = hsv hsv = lp.collect_common_factors_on_increment( hsv, "rhsQ_buf", vary_by_axes=(0, ) if ilp_multiple > 1 else ()) if opt_level >= 8: tap_hsv = hsv hsv = tap_hsv hsv = lp.set_options(hsv, cl_build_options=[ "-cl-denorms-are-zero", "-cl-fast-relaxed-math", "-cl-finite-math-only", "-cl-mad-enable", "-cl-no-signed-zeros" ]) if 1: print("OPS") op_map = lp.get_op_map(hsv, subgroup_size=32) print(lp.stringify_stats_mapping(op_map)) print("MEM") gmem_map = lp.get_mem_access_map(hsv, subgroup_size=32).to_bytes() print(lp.stringify_stats_mapping(gmem_map)) # FIXME: renaming's a bit tricky in this program model. # add a simple transformation for it # hsv = hsv.copy(name="horizontalStrongVolumeKernel") results = lp.auto_test_vs_ref(ref_hsv, ctx, hsv, parameters=dict(elements=300), quiet=True) elapsed = results["elapsed_wall"] print("elapsed", elapsed)
def __call__(self, ary): from meshmode.dof_array import DOFArray if not isinstance(ary, DOFArray): raise TypeError("non-array passed to discretization connection") actx = ary.array_context @memoize_in(actx, ( DirectDiscretizationConnection, "resample_by_mat_knl", self.is_surjective, )) def mat_knl(): if self.is_surjective: domains = [ """ {[iel, idof, j]: 0<=iel<nelements and 0<=idof<n_to_nodes and 0<=j<n_from_nodes} """, ] instructions = """ result[to_element_indices[iel], idof] \ = sum(j, resample_mat[idof, j] \ * ary[from_element_indices[iel], j]) """ else: domains = [ """ {[iel_init, idof_init]: 0<=iel_init<nelements_result and 0<=idof_init<n_to_nodes} """, """ {[iel, idof, j]: 0<=iel<nelements and 0<=idof<n_to_nodes and 0<=j<n_from_nodes} """, ] instructions = """ result[iel_init, idof_init] = 0 {id=init} ... gbarrier {id=barrier, dep=init} result[to_element_indices[iel], idof] \ = sum(j, resample_mat[idof, j] \ * ary[from_element_indices[iel], j]) {dep=barrier} """ knl = make_loopy_program( domains, instructions, [ lp.GlobalArg("result", None, shape="nelements_result, n_to_nodes", offset=lp.auto), lp.GlobalArg("ary", None, shape="nelements_vec, n_from_nodes", offset=lp.auto), lp.ValueArg("nelements_result", np.int32), lp.ValueArg("nelements_vec", np.int32), lp.ValueArg("n_from_nodes", np.int32), "...", ], name="resample_by_mat") return knl @memoize_in(actx, (DirectDiscretizationConnection, "resample_by_picking_knl", self.is_surjective)) def pick_knl(): if self.is_surjective: domains = [ """{[iel, idof]: 0<=iel<nelements and 0<=idof<n_to_nodes}""" ] instructions = """ result[to_element_indices[iel], idof] \ = ary[from_element_indices[iel], pick_list[idof]] """ else: domains = [ """ {[iel_init, idof_init]: 0<=iel_init<nelements_result and 0<=idof_init<n_to_nodes} """, """ {[iel, idof]: 0<=iel<nelements and 0<=idof<n_to_nodes} """ ] instructions = """ result[iel_init, idof_init] = 0 {id=init} ... gbarrier {id=barrier, dep=init} result[to_element_indices[iel], idof] \ = ary[from_element_indices[iel], pick_list[idof]] {dep=barrier} """ knl = make_loopy_program( domains, instructions, [ lp.GlobalArg("result", None, shape="nelements_result, n_to_nodes", offset=lp.auto), lp.GlobalArg("ary", None, shape="nelements_vec, n_from_nodes", offset=lp.auto), lp.ValueArg("nelements_result", np.int32), lp.ValueArg("nelements_vec", np.int32), lp.ValueArg("n_from_nodes", np.int32), "...", ], name="resample_by_picking") return knl if ary.shape != (len(self.from_discr.groups), ): raise ValueError("invalid shape of incoming resampling data") group_idx_to_result = [] for i_tgrp, (tgrp, cgrp) in enumerate(zip(self.to_discr.groups, self.groups)): kernels = [] # get kernels for each batch; to be fused eventually kwargs = {} # kwargs to the fused kernel for i_batch, batch in enumerate(cgrp.batches): if batch.from_element_indices.size == 0: continue point_pick_indices = self._resample_point_pick_indices( actx, i_tgrp, i_batch) if point_pick_indices is None: knl = mat_knl() knl = lp.rename_argument(knl, "resample_mat", f"resample_mat_{i_batch}") kwargs[f"resample_mat_{i_batch}"] = (self._resample_matrix( actx, i_tgrp, i_batch)) else: knl = pick_knl() knl = lp.rename_argument(knl, "pick_list", f"pick_list_{i_batch}") kwargs[f"pick_list_{i_batch}"] = point_pick_indices # {{{ enforce different namespaces for the kernels for iname in knl.all_inames(): knl = lp.rename_iname(knl, iname, f"{iname}_{i_batch}") knl = lp.rename_argument(knl, "ary", f"ary_{i_batch}") knl = lp.rename_argument(knl, "from_element_indices", f"from_element_indices_{i_batch}") knl = lp.rename_argument(knl, "to_element_indices", f"to_element_indices_{i_batch}") knl = lp.rename_argument(knl, "nelements", f"nelements_{i_batch}") # }}} kwargs[f"ary_{i_batch}"] = ary[batch.from_group_index] kwargs[f"from_element_indices_{i_batch}"] = ( batch.from_element_indices) kwargs[f"to_element_indices_{i_batch}"] = ( batch.to_element_indices) kernels.append(knl) fused_knl = lp.fuse_kernels(kernels) # order of operations doesn't matter fused_knl = lp.add_nosync(fused_knl, "global", "writes:result", "writes:result", bidirectional=True, force=True) result_dict = actx.call_loopy(fused_knl, nelements_result=tgrp.nelements, n_to_nodes=tgrp.nunit_dofs, **kwargs) group_idx_to_result.append(result_dict["result"]) from meshmode.dof_array import DOFArray return DOFArray.from_list(actx, group_idx_to_result)
def test_write_block_matrix_fusion(ctx_factory): """ A slightly more complicated fusion test, where all sub-kernels write into the same global matrix, but in well-defined separate blocks. This tests makes sure data flow specification is preserved during fusion for matrix-assembly-like programs. """ ctx = ctx_factory() queue = cl.CommandQueue(ctx) def init_global_mat_prg(): return lp.make_kernel( ["{[idof]: 0 <= idof < n}", "{[jdof]: 0 <= jdof < m}"], """ result[idof, jdof] = 0 {id=init} """, [ lp.GlobalArg("result", None, shape="n, m", offset=lp.auto), lp.ValueArg("n, m", np.int32), "...", ], options=lp.Options(return_dict=True), default_offset=lp.auto, name="init_a_global_matrix", ) def write_into_mat_prg(): return lp.make_kernel( ["{[idof]: 0 <= idof < ndofs}", "{[jdof]: 0 <= jdof < mdofs}"], """ result[offset_i + idof, offset_j + jdof] = mat[idof, jdof] """, [ lp.GlobalArg("result", None, shape="n, m", offset=lp.auto), lp.ValueArg("n, m", np.int32), lp.GlobalArg("mat", None, shape="ndofs, mdofs", offset=lp.auto), lp.ValueArg("offset_i", np.int32), lp.ValueArg("offset_j", np.int32), "...", ], options=lp.Options(return_dict=True), default_offset=lp.auto, name="write_into_global_matrix", ) # Construct a 2x2 diagonal matrix with # random 5x5 blocks on the block-diagonal, # and zeros elsewhere n = 10 block_n = 5 mat1 = np.random.randn(block_n, block_n) mat2 = np.random.randn(block_n, block_n) answer = np.block([[mat1, np.zeros((block_n, block_n))], [np.zeros((block_n, block_n)), mat2]]) kwargs = {"n": n, "m": n} # Do some renaming of individual programs before fusion kernels = [init_global_mat_prg()] for idx, (offset, mat) in enumerate([(0, mat1), (block_n, mat2)]): knl = lp.rename_argument(write_into_mat_prg(), "mat", f"mat_{idx}") kwargs[f"mat_{idx}"] = mat for iname in knl.default_entrypoint.all_inames(): knl = lp.rename_iname(knl, iname, f"{iname}_{idx}") knl = lp.rename_argument(knl, "ndofs", f"ndofs_{idx}") knl = lp.rename_argument(knl, "mdofs", f"mdofs_{idx}") kwargs[f"ndofs_{idx}"] = block_n kwargs[f"mdofs_{idx}"] = block_n knl = lp.rename_argument(knl, "offset_i", f"offset_i_{idx}") knl = lp.rename_argument(knl, "offset_j", f"offset_j_{idx}") kwargs[f"offset_i_{idx}"] = offset kwargs[f"offset_j_{idx}"] = offset kernels.append(knl) fused_knl = lp.fuse_kernels( kernels, data_flow=[("result", 0, 1), ("result", 1, 2)], ) fused_knl = lp.add_nosync(fused_knl, "global", "writes:result", "writes:result", bidirectional=True, force=True) evt, result = fused_knl(queue, **kwargs) result = result["result"] np.testing.assert_allclose(result, answer)