def stage_repeat_inline_closure(pipeline): assert pipeline.func_ir inline_pass = InlineClosureCallPass(pipeline.func_ir, pipeline.flags.auto_parallel) inline_pass.run() post_proc = postproc.PostProcessor(pipeline.func_ir) post_proc.run()
def stage_repeat_inline_closure(self): assert self.func_ir inline_pass = InlineClosureCallPass( self.func_ir, self.flags.auto_parallel) inline_pass.run() post_proc = postproc.PostProcessor(self.func_ir) post_proc.run()
def with_lifting(func_ir, typingctx, targetctx, flags, locals): """With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """ from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them according to # the kind of contextmanager sub_irs = [] for (blk_start, blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs
def run_pass(self, state): state.array_analysis = ArrayAnalysis(state.typingctx, state.func_ir, state.type_annotation.typemap, state.type_annotation.calltypes) state.array_analysis.run(state.func_ir.blocks) post_proc = postproc.PostProcessor(state.func_ir) post_proc.run() state.func_ir_copies.append(state.func_ir.copy()) if state.test_idempotence and len(state.func_ir_copies) > 1: state.test_idempotence(state.func_ir_copies) return False
def ir_processing_stage(func_ir): post_proc = postproc.PostProcessor(func_ir) post_proc.run() if config.DEBUG or config.DUMP_IR: name = func_ir.func_id.func_qualname print(("IR DUMP: %s" % name).center(80, "-")) func_ir.dump() if func_ir.is_generator: print(("GENERATOR INFO: %s" % name).center(80, "-")) func_ir.dump_generator_info() return func_ir
def run_frontend(func): """ Run the compiler frontend over the given Python function, and return the function's canonical Numba IR. """ # XXX make this a dedicated Pipeline? func_id = bytecode.FunctionIdentity.from_function(func) interp = interpreter.Interpreter(func_id) bc = bytecode.ByteCode(func_id=func_id) func_ir = interp.interpret(bc) post_proc = postproc.PostProcessor(func_ir) post_proc.run() return func_ir
def stage_inline_pass(self): """ Inline calls to locally defined closures. """ # Ensure we have an IR and type information. assert self.func_ir inline_pass = InlineClosureCallPass(self.func_ir, self.flags) inline_pass.run() # Remove all Dels, and re-run postproc post_proc = postproc.PostProcessor(self.func_ir) post_proc.run() if config.DEBUG or config.DUMP_IR: name = self.func_ir.func_id.func_qualname print(("IR DUMP: %s" % name).center(80, "-")) self.func_ir.dump()
def run_frontend(func, inline_closures=False): """ Run the compiler frontend over the given Python function, and return the function's canonical Numba IR. If inline_closures is Truthy then closure inlining will be run """ # XXX make this a dedicated Pipeline? func_id = bytecode.FunctionIdentity.from_function(func) interp = interpreter.Interpreter(func_id) bc = bytecode.ByteCode(func_id=func_id) func_ir = interp.interpret(bc) if inline_closures: inline_pass = InlineClosureCallPass(func_ir, cpu.ParallelOptions(False), {}, False) inline_pass.run() post_proc = postproc.PostProcessor(func_ir) post_proc.run() return func_ir
def apply(self, kind, state): '''Given a pipeline and a dictionary of basic blocks, exhaustively attempt to apply all registered rewrites to all basic blocks. ''' assert kind in self._kinds blocks = state.func_ir.blocks old_blocks = blocks.copy() for rewrite_cls in self.rewrites[kind]: # Exhaustively apply a rewrite until it stops matching. rewrite = rewrite_cls(state) work_list = list(blocks.items()) while work_list: key, block = work_list.pop() matches = rewrite.match(state.func_ir, block, state.typemap, state.calltypes) if matches: if config.DEBUG or config.DUMP_IR: print("_" * 70) print("REWRITING (%s):" % rewrite_cls.__name__) block.dump() print("_" * 60) new_block = rewrite.apply() blocks[key] = new_block work_list.append((key, new_block)) if config.DEBUG or config.DUMP_IR: new_block.dump() print("_" * 70) # If any blocks were changed, perform a sanity check. for key, block in blocks.items(): if block != old_blocks[key]: block.verify() # Some passes, e.g. _inline_const_arraycall are known to occasionally # do invalid things WRT ir.Del, others, e.g. RewriteArrayExprs do valid # things with ir.Del, but the placement is not optimal. The lines below # fix-up the IR so that ref counts are valid and optimally placed, # see #4093 for context. This has to be run here opposed to in # apply() as the CFG needs computing so full IR is needed. from numba import postproc post_proc = postproc.PostProcessor(state.func_ir) post_proc.run()
def with_lifting(func_ir, typingctx, targetctx, flags, locals): """With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """ from numba import postproc def dispatcher_factory(func_ir): from numba.dispatcher import LiftedWith return LiftedWith(func_ir, typingctx, targetctx, flags, locals) postproc.PostProcessor(func_ir).run() assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # Remove the with blocks that are in the with-body sub_irs = [] for (blk_start, blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) cmkind = _get_with_contextmanager(func_ir, blocks, blk_start) sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs
def stage_inline_pass(self): """ Inline calls to locally defined closures. """ # Ensure we have an IR and type information. assert self.func_ir # if the return type is a pyobject, there's no type info available and # no ability to resolve certain typed function calls in the array # inlining code, use this variable to indicate typed_pass = not isinstance(self.return_type, types.misc.PyObject) inline_pass = InlineClosureCallPass( self.func_ir, self.flags.auto_parallel, self.parfor_diagnostics.replaced_fns, typed_pass) inline_pass.run() # Remove all Dels, and re-run postproc post_proc = postproc.PostProcessor(self.func_ir) post_proc.run() if config.DEBUG or config.DUMP_IR: name = self.func_ir.func_id.func_qualname print(("IR DUMP: %s" % name).center(80, "-")) self.func_ir.dump()
def run_pass(self, state): post_proc = postproc.PostProcessor(state.func_ir) post_proc.run() return True