Exemplo n.º 1
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     base_oir = gtir_to_oir.GTIRToOIR().visit(gtir)
     oir_pipeline = self.backend.builder.options.backend_opts.get(
         "oir_pipeline", DefaultPipeline(skip=[FillFlushToLocalKCaches]))
     oir = oir_pipeline.run(base_oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     format_source = self.backend.builder.options.format_source
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp,
         gt_backend_t=self.backend.GT_BACKEND_T,
         format_source=format_source)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           backend=self.backend,
                                           format_source=format_source)
     bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Exemplo n.º 2
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     oir = gtir_to_oir.GTIRToOIR().visit(gtir)
     oir = self._optimize_oir(oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.gt_backend_t)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           gt_backend_t=self.gt_backend_t)
     bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Exemplo n.º 3
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = GtirPipeline(DefIRToGTIR.apply(definition_ir)).full()
     oir = OirPipeline(gtir_to_oir.GTIRToOIR().visit(gtir)).full(
         skip=[FillFlushToLocalKCaches])
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.backend.GT_BACKEND_T)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           backend=self.backend)
     bindings_ext = ".cu" if self.backend.GT_BACKEND_T == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }
Exemplo n.º 4
0
 def __call__(self, definition_ir) -> Dict[str, Dict[str, str]]:
     gtir = DefIRToGTIR.apply(definition_ir)
     gtir_without_unused_params = prune_unused_parameters(gtir)
     dtype_deduced = resolve_dtype(gtir_without_unused_params)
     upcasted = upcast(dtype_deduced)
     oir = gtir_to_oir.GTIRToOIR().visit(upcasted)
     oir = self._optimize_oir(oir)
     gtcpp = oir_to_gtcpp.OIRToGTCpp().visit(oir)
     implementation = gtcpp_codegen.GTCppCodegen.apply(
         gtcpp, gt_backend_t=self.gt_backend_t)
     bindings = GTCppBindingsCodegen.apply(gtcpp,
                                           module_name=self.module_name,
                                           gt_backend_t=self.gt_backend_t)
     bindings_ext = ".cu" if self.gt_backend_t == "gpu" else ".cpp"
     return {
         "computation": {
             "computation.hpp": implementation
         },
         "bindings": {
             "bindings" + bindings_ext: bindings
         },
     }