def __call__( self, definition_ir: StencilDefinition) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[MaskStmtMerging, MaskInlining]), ) oir = oir_pipeline.run(base_oir) sdfg = OirSDFGBuilder().visit(oir) sdfg.expand_library_nodes(recursive=True) sdfg.apply_strict_transformations(validate=True) implementation = DaCeComputationCodegen.apply(gtir, sdfg) bindings = DaCeBindingsCodegen.apply(gtir, sdfg, module_name=self.module_name, backend=self.backend) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[FillFlushToLocalKCaches])) oir = oir_pipeline.run(base_oir) gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir) format_source = self.backend.builder.options.format_source implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp, gt_backend_t=self.backend.GT_BACKEND_T, format_source=format_source) bindings = GTCppBindingsCodegen.apply(gtcpp, module_name=self.module_name, backend=self.backend, format_source=format_source) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[NoFieldAccessPruning])) oir = oir_pipeline.run(base_oir) oir = FillFlushToLocalKCaches().visit(oir) cuir = oir_to_cuir.OIRToCUIR().visit(oir) cuir = kernel_fusion.FuseKernels().visit(cuir) cuir = extent_analysis.CacheExtents().visit(cuir) format_source = self.backend.builder.options.format_source implementation = cuir_codegen.CUIRCodegen.apply( cuir, format_source=format_source) bindings = GTCCudaBindingsCodegen.apply(cuir, module_name=self.module_name, backend=self.backend, format_source=format_source) return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings.cu": bindings }, }
def __call__( self, definition_ir: StencilDefinition) -> Dict[str, Dict[str, str]]: gtir = DefIRToGTIR.apply(definition_ir) gtir_without_unused_params = prune_unused_parameters(gtir) dtype_deduced = resolve_dtype(gtir_without_unused_params) upcasted = upcast(dtype_deduced) oir = gtir_to_oir.GTIRToOIR().visit(upcasted) oir = self._optimize_oir(oir) sdfg = OirSDFGBuilder().visit(oir) sdfg.expand_library_nodes(recursive=True) # TODO uncomment once the branch dace/linus-fixes-8 is merged into dace/master # sdfg.apply_strict_transformations(validate=True) # noqa: E800 Found commented out code implementation = DaCeComputationCodegen.apply(gtir, sdfg) bindings = DaCeBindingsCodegen.apply(gtir, sdfg, module_name=self.module_name) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def stencil_def_to_oir(stencil_def, externals): build_options = BuildOptions( name=stencil_def.__name__, module=__name__, rebuild=True, backend_opts={}, build_info=None ) definition_ir = GTScriptFrontend.generate( stencil_def, externals=externals, options=build_options ) gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() return GTIRToOIR().visit(gtir)
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = DefIRToGTIR.apply(definition_ir) gtir_without_unused_params = prune_unused_parameters(gtir) dtype_deduced = resolve_dtype(gtir_without_unused_params) upcasted = upcast(dtype_deduced) oir = gtir_to_oir.GTIRToOIR().visit(upcasted) oir = self._optimize_oir(oir) cuir = oir_to_cuir.OIRToCUIR().visit(oir) cuir = kernel_fusion.FuseKernels().visit(cuir) cuir = extent_analysis.ComputeExtents().visit(cuir) cuir = extent_analysis.CacheExtents().visit(cuir) implementation = cuir_codegen.CUIRCodegen.apply(cuir) bindings = GTCCudaBindingsCodegen.apply(cuir, module_name=self.module_name) return { "computation": {"computation.hpp": implementation}, "bindings": {"bindings.cu": bindings}, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir = self._optimize_oir(oir) gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir) implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp, gt_backend_t=self.gt_backend_t) bindings = GTCppBindingsCodegen.apply(gtcpp, module_name=self.module_name, gt_backend_t=self.gt_backend_t) bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() oir = OirPipeline(gtir_to_oir.GTIRToOIR().visit(gtir)).full( skip=[FillFlushToLocalKCaches]) gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir) implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp, gt_backend_t=self.backend.GT_BACKEND_T) bindings = GTCppBindingsCodegen.apply(gtcpp, module_name=self.module_name, backend=self.backend) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() oir = OirPipeline(gtir_to_oir.GTIRToOIR().visit(gtir)).full( skip=[NoFieldAccessPruning]) cuir = oir_to_cuir.OIRToCUIR().visit(oir) cuir = kernel_fusion.FuseKernels().visit(cuir) cuir = extent_analysis.ComputeExtents().visit(cuir) cuir = extent_analysis.CacheExtents().visit(cuir) implementation = cuir_codegen.CUIRCodegen.apply(cuir) bindings = GTCCudaBindingsCodegen.apply(cuir, module_name=self.module_name, backend=self.backend) return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings.cu": bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = DefIRToGTIR.apply(definition_ir) gtir_without_unused_params = prune_unused_parameters(gtir) dtype_deduced = resolve_dtype(gtir_without_unused_params) upcasted = upcast(dtype_deduced) oir = gtir_to_oir.GTIRToOIR().visit(upcasted) oir = self._optimize_oir(oir) gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir) implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp, gt_backend_t=self.gt_backend_t) bindings = GTCppBindingsCodegen.apply(gtcpp, module_name=self.module_name, gt_backend_t=self.gt_backend_t) bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def defir_to_gtir(): yield DefIRToGTIR()
def gtir_pipeline(self) -> GtirPipeline: return self._build_data.get( "gtir_pipeline") or self._build_data.setdefault( "gtir_pipeline", GtirPipeline(DefIRToGTIR.apply(self.definition_ir)))