def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[NoFieldAccessPruning])) oir = oir_pipeline.run(base_oir) oir = FillFlushToLocalKCaches().visit(oir) cuir = oir_to_cuir.OIRToCUIR().visit(oir) cuir = kernel_fusion.FuseKernels().visit(cuir) cuir = extent_analysis.CacheExtents().visit(cuir) format_source = self.backend.builder.options.format_source implementation = cuir_codegen.CUIRCodegen.apply( cuir, format_source=format_source) bindings = GTCCudaBindingsCodegen.apply(cuir, module_name=self.module_name, backend=self.backend, format_source=format_source) return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings.cu": bindings }, }
def __call__( self, definition_ir: StencilDefinition) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[MaskStmtMerging, MaskInlining]), ) oir = oir_pipeline.run(base_oir) sdfg = OirSDFGBuilder().visit(oir) sdfg.expand_library_nodes(recursive=True) sdfg.apply_strict_transformations(validate=True) implementation = DaCeComputationCodegen.apply(gtir, sdfg) bindings = DaCeBindingsCodegen.apply(gtir, sdfg, module_name=self.module_name, backend=self.backend) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]: gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full() base_oir = gtir_to_oir.GTIRToOIR().visit(gtir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[FillFlushToLocalKCaches])) oir = oir_pipeline.run(base_oir) gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir) format_source = self.backend.builder.options.format_source implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp, gt_backend_t=self.backend.GT_BACKEND_T, format_source=format_source) bindings = GTCppBindingsCodegen.apply(gtcpp, module_name=self.module_name, backend=self.backend, format_source=format_source) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": { "computation.hpp": implementation }, "bindings": { "bindings" + bindings_ext: bindings }, }
def _make_npir(self) -> npir.Computation: base_oir = GTIRToOIR().visit(self.builder.gtir) oir_pipeline = self.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[ IJCacheDetection, KCacheDetection, PruneKCacheFills, PruneKCacheFlushes, FillFlushToLocalKCaches, ]), ) oir = oir_pipeline.run(base_oir) return OirToNpir().visit(oir)
def _make_npir(self) -> npir.Computation: base_oir = GTIRToOIR().visit(self.builder.gtir) oir_pipeline = self.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[ IJCacheDetection, KCacheDetection, PruneKCacheFills, PruneKCacheFlushes, ]), ) oir_node = oir_pipeline.run(base_oir) base_npir = OirToNpir().visit(oir_node) npir_node = ScalarsToTemporaries().visit(base_npir) return npir_node
def __call__(self, stencil_ir: gtir.Stencil) -> Dict[str, Dict[str, str]]: base_oir = GTIRToOIR().visit(stencil_ir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline(skip=[MaskInlining]), ) oir_node = oir_pipeline.run(base_oir) sdfg = OirSDFGBuilder().visit(oir_node) _to_device(sdfg, self.backend.storage_info["device"]) sdfg = _expand_and_finalize_sdfg( stencil_ir, sdfg, self.backend.storage_info["layout_map"]) # strip history from SDFG for faster save/load for tmp_sdfg in sdfg.all_sdfgs_recursive(): tmp_sdfg.transformation_hist = [] tmp_sdfg.orig_sdfg = None sources: Dict[str, Dict[str, str]] implementation = DaCeComputationCodegen.apply(stencil_ir, sdfg) bindings = DaCeBindingsCodegen.apply(stencil_ir, sdfg, module_name=self.module_name, backend=self.backend) bindings_ext = "cu" if self.backend.storage_info[ "device"] == "gpu" else "cpp" sources = { "computation": { "computation.hpp": implementation }, "bindings": { f"bindings.{bindings_ext}": bindings }, "info": { self.backend.builder.module_name + ".sdfg": dumps(sdfg.to_json()) }, } return sources
def __call__(self, stencil_ir: gtir.Stencil) -> Dict[str, Dict[str, str]]: stencil_ir = GtirPipeline(stencil_ir).full() base_oir = GTIRToOIR().visit(stencil_ir) oir_pipeline = self.backend.builder.options.backend_opts.get( "oir_pipeline", DefaultPipeline() ) oir_node = oir_pipeline.run(base_oir) gtcpp_ir = OIRToGTCpp().visit(oir_node) format_source = self.backend.builder.options.format_source implementation = gtcpp_codegen.GTCppCodegen.apply( gtcpp_ir, gt_backend_t=self.backend.GT_BACKEND_T, format_source=format_source ) bindings = GTCppBindingsCodegen.apply( gtcpp_ir, module_name=self.module_name, backend=self.backend, format_source=format_source, ) bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp" return { "computation": {"computation.hpp": implementation}, "bindings": {"bindings" + bindings_ext: bindings}, }
def test_skip(): skip = [AdjacentLoopMerging] pipeline = DefaultPipeline(skip=skip) pipeline.run(StencilFactory()) assert all(s not in pipeline.steps for s in skip)
def test_no_skipping(): pipeline = DefaultPipeline() pipeline.run(StencilFactory()) assert pipeline.steps == DefaultPipeline.all_steps()