Esempio n. 1
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     base_oir = gtir_to_oir.GTIRToOIR().visit(gtir)
     oir_pipeline = self.backend.builder.options.backend_opts.get(
         "oir_pipeline", DefaultPipeline(skip=[NoFieldAccessPruning]))
     oir = oir_pipeline.run(base_oir)
     oir = FillFlushToLocalKCaches().visit(oir)
     cuir = oir_to_cuir.OIRToCUIR().visit(oir)
     cuir = kernel_fusion.FuseKernels().visit(cuir)
     cuir = extent_analysis.CacheExtents().visit(cuir)
     format_source = self.backend.builder.options.format_source
     implementation = cuir_codegen.CUIRCodegen.apply(
         cuir, format_source=format_source)
     bindings = GTCCudaBindingsCodegen.apply(cuir,
                                             module_name=self.module_name,
                                             backend=self.backend,
                                             format_source=format_source)
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings.cu": bindings
         },
     }
Esempio n. 2
0
    def __call__(
            self,
            definition_ir: StencilDefinition) -> Dict[str, Dict[str, str]]:
        gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
        base_oir = gtir_to_oir.GTIRToOIR().visit(gtir)
        oir_pipeline = self.backend.builder.options.backend_opts.get(
            "oir_pipeline",
            DefaultPipeline(skip=[MaskStmtMerging, MaskInlining]),
        )
        oir = oir_pipeline.run(base_oir)
        sdfg = OirSDFGBuilder().visit(oir)
        sdfg.expand_library_nodes(recursive=True)
        sdfg.apply_strict_transformations(validate=True)

        implementation = DaCeComputationCodegen.apply(gtir, sdfg)
        bindings = DaCeBindingsCodegen.apply(gtir,
                                             sdfg,
                                             module_name=self.module_name,
                                             backend=self.backend)

        bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
        return {
            "computation": {
                "computation.hpp": implementation
            },
            "bindings": {
                "bindings" + bindings_ext: bindings
            },
        }
Esempio n. 3
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     base_oir = gtir_to_oir.GTIRToOIR().visit(gtir)
     oir_pipeline = self.backend.builder.options.backend_opts.get(
         "oir_pipeline", DefaultPipeline(skip=[FillFlushToLocalKCaches]))
     oir = oir_pipeline.run(base_oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     format_source = self.backend.builder.options.format_source
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp,
         gt_backend_t=self.backend.GT_BACKEND_T,
         format_source=format_source)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           backend=self.backend,
                                           format_source=format_source)
     bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Esempio n. 4
0
    def __call__(
            self,
            definition_ir: StencilDefinition) -> Dict[str, Dict[str, str]]:
        gtir = DefIRToGTIR.apply(definition_ir)
        gtir_without_unused_params = prune_unused_parameters(gtir)
        dtype_deduced = resolve_dtype(gtir_without_unused_params)
        upcasted = upcast(dtype_deduced)
        oir = gtir_to_oir.GTIRToOIR().visit(upcasted)
        oir = self._optimize_oir(oir)
        sdfg = OirSDFGBuilder().visit(oir)
        sdfg.expand_library_nodes(recursive=True)
        # TODO uncomment once the branch dace/linus-fixes-8 is merged into dace/master
        # sdfg.apply_strict_transformations(validate=True) # noqa: E800 Found commented out code

        implementation = DaCeComputationCodegen.apply(gtir, sdfg)
        bindings = DaCeBindingsCodegen.apply(gtir,
                                             sdfg,
                                             module_name=self.module_name)

        bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
        return {
            "computation": {
                "computation.hpp": implementation
            },
            "bindings": {
                "bindings" + bindings_ext: bindings
            },
        }
Esempio n. 5
0
def make_args_data_from_gtir(pipeline: GtirPipeline) -> ModuleData:
    """
    Compute module data containing information about stencil arguments from gtir.

    This is no longer compatible with the legacy backends.
    """
    data = ModuleData()

    # NOTE: pipeline.gtir has not had prune_unused_parameters applied.
    all_params = pipeline.gtir.params

    node = pipeline.full()
    oir = gtir_to_oir.GTIRToOIR().visit(node)
    field_extents = compute_fields_extents(oir)
    accesses = compute_access_kinds(oir)

    for decl in (param for param in all_params
                 if isinstance(param, gtir.FieldDecl)):
        access = accesses[decl.name]
        dtype = numpy.dtype(decl.dtype.name.lower())

        if access != AccessKind.NONE:
            k_boundary = compute_k_boundary(node)[decl.name]
            boundary = Boundary(*field_extents[decl.name].to_boundary()[0:2],
                                k_boundary)
        else:
            boundary = Boundary.zeros(ndims=3)

        data.field_info[decl.name] = FieldInfo(
            access=access,
            boundary=boundary,
            axes=tuple(dimension_flags_to_names(decl.dimensions).upper()),
            data_dims=tuple(decl.data_dims),
            dtype=dtype,
        )

    for decl in (param for param in all_params
                 if isinstance(param, gtir.ScalarDecl)):
        access = accesses[decl.name]
        dtype = numpy.dtype(decl.dtype.name.lower())
        data.parameter_info[decl.name] = ParameterInfo(access=access,
                                                       dtype=dtype)

    data.unreferenced = [
        *sorted(name for name in accesses if accesses[name] == AccessKind.NONE)
    ]
    return data
Esempio n. 6
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = DefIRToGTIR.apply(definition_ir)
     gtir_without_unused_params = prune_unused_parameters(gtir)
     dtype_deduced = resolve_dtype(gtir_without_unused_params)
     upcasted = upcast(dtype_deduced)
     oir = gtir_to_oir.GTIRToOIR().visit(upcasted)
     oir = self._optimize_oir(oir)
     cuir = oir_to_cuir.OIRToCUIR().visit(oir)
     cuir = kernel_fusion.FuseKernels().visit(cuir)
     cuir = extent_analysis.ComputeExtents().visit(cuir)
     cuir = extent_analysis.CacheExtents().visit(cuir)
     implementation = cuir_codegen.CUIRCodegen.apply(cuir)
     bindings = GTCCudaBindingsCodegen.apply(cuir, module_name=self.module_name)
     return {
         "computation": {"computation.hpp": implementation},
         "bindings": {"bindings.cu": bindings},
     }
Esempio n. 7
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     oir = gtir_to_oir.GTIRToOIR().visit(gtir)
     oir = self._optimize_oir(oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.gt_backend_t)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           gt_backend_t=self.gt_backend_t)
     bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Esempio n. 8
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     oir = OirPipeline(gtir_to_oir.GTIRToOIR().visit(gtir)).full(
         skip=[FillFlushToLocalKCaches])
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.backend.GT_BACKEND_T)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           backend=self.backend)
     bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Esempio n. 9
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     oir = OirPipeline(gtir_to_oir.GTIRToOIR().visit(gtir)).full(
         skip=[NoFieldAccessPruning])
     cuir = oir_to_cuir.OIRToCUIR().visit(oir)
     cuir = kernel_fusion.FuseKernels().visit(cuir)
     cuir = extent_analysis.ComputeExtents().visit(cuir)
     cuir = extent_analysis.CacheExtents().visit(cuir)
     implementation = cuir_codegen.CUIRCodegen.apply(cuir)
     bindings = GTCCudaBindingsCodegen.apply(cuir,
                                             module_name=self.module_name,
                                             backend=self.backend)
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings.cu": bindings
         },
     }
Esempio n. 10
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = DefIRToGTIR.apply(definition_ir)
     gtir_without_unused_params = prune_unused_parameters(gtir)
     dtype_deduced = resolve_dtype(gtir_without_unused_params)
     upcasted = upcast(dtype_deduced)
     oir = gtir_to_oir.GTIRToOIR().visit(upcasted)
     oir = self._optimize_oir(oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.gt_backend_t)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           gt_backend_t=self.gt_backend_t)
     bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Esempio n. 11
0
    def generate_dace_args(self, gtir, sdfg):
        oir = gtir_to_oir.GTIRToOIR().visit(gtir)
        field_extents = compute_fields_extents(oir, add_k=True)

        offset_dict: Dict[str, Tuple[int, int, int]] = {
            k: (-v[0][0], -v[1][0], -v[2][0])
            for k, v in field_extents.items()
        }
        k_origins = {
            field_name: boundary[0]
            for field_name, boundary in compute_k_boundary(gtir).items()
        }
        for name, origin in k_origins.items():
            offset_dict[name] = (offset_dict[name][0], offset_dict[name][1],
                                 origin)

        symbols = {f"__{var}": f"__{var}" for var in "IJK"}
        for name, array in sdfg.arrays.items():
            if array.transient:
                symbols[f"__{name}_K_stride"] = "1"
                symbols[f"__{name}_J_stride"] = str(array.shape[2])
                symbols[f"__{name}_I_stride"] = str(array.shape[1] *
                                                    array.shape[2])
            else:
                dims = [
                    dim for dim, select in zip("IJK", array_dimensions(array))
                    if select
                ]
                data_ndim = len(array.shape) - len(dims)

                # api field strides
                fmt = "gt::sid::get_stride<{dim}>(gt::sid::get_strides(__{name}_sid))"

                symbols.update({
                    f"__{name}_{dim}_stride":
                    fmt.format(dim=f"gt::stencil::dim::{dim.lower()}",
                               name=name)
                    for dim in dims
                })
                symbols.update({
                    f"__{name}_d{dim}_stride":
                    fmt.format(dim=f"gt::integral_constant<int, {3 + dim}>",
                               name=name)
                    for dim in range(data_ndim)
                })

                # api field pointers
                fmt = """gt::sid::multi_shifted(
                             gt::sid::get_origin(__{name}_sid)(),
                             gt::sid::get_strides(__{name}_sid),
                             std::array<gt::int_t, {ndim}>{{{origin}}}
                         )"""
                origin = tuple(-offset_dict[name][idx]
                               for idx, var in enumerate("IJK") if any(
                                   dace.symbolic.pystr_to_symbolic(f"__{var}")
                                   in s.free_symbols for s in array.shape
                                   if hasattr(s, "free_symbols")))
                symbols[name] = fmt.format(name=name,
                                           ndim=len(array.shape),
                                           origin=",".join(
                                               str(o) for o in origin))
        # the remaining arguments are variables and can be passed by name
        for sym in sdfg.signature_arglist(with_types=False, for_call=True):
            if sym not in symbols:
                symbols[sym] = sym

        # return strings in order of sdfg signature
        return [
            symbols[s]
            for s in sdfg.signature_arglist(with_types=False, for_call=True)
        ]