def simulate_fresh_target(self): hwstr = 'cpu' dispatcher_cls = resolve_dispatcher_from_str(hwstr) old_descr = dispatcher_cls.targetdescr # Simulate fresh targetdescr dispatcher_cls.targetdescr = type(dispatcher_cls.targetdescr)(hwstr) try: yield finally: # Be sure to reinstantiate old descriptor, otherwise other # objects may be out of sync. dispatcher_cls.targetdescr = old_descr
def run_pass(self, state): func_ir = state.func_ir mutated = False for blk in func_ir.blocks.values(): # find the assignment nodes in the block and walk them, if # there's a DPU version then swap out for a call to that for call in blk.find_exprs("call"): function = state.typemap[call.func.name] tname = "dpu" # Note: `target_override` context driven compilation can # be done here, the DPU target is in use. with target_override(tname): try: sig = function.get_call_type( state.typingctx, state.calltypes[call].args, {}, ) disp = resolve_dispatcher_from_str(tname) # force compile check hw_ctx = disp.targetdescr.target_context hw_ctx.get_function(function, sig) except Exception as e: if _DEBUG: msg = ( f"Failed to find and compile an " f"overload for {function} for {tname} " f"due to {e}") print(msg) continue # This is a necessary hack at present so as to # generate code into the same library. I.e. the DPU # target is going to do code gen into the CPUs lib. hw_ctx._codelib_stack = ( state.targetctx._codelib_stack) # All is good, so switch IR node for one targeting # this target. Should generate this, but for now # just mutate as: # ir.Expr.call(call.func, call.args, call.kws, # call.loc, target='dpu') call.target = tname mutated = True # return True if the IR was mutated, False if not. return mutated
def _jit(sigs, locals, target, cache, targetoptions, **dispatcher_args): from numba.core.target_extension import resolve_dispatcher_from_str dispatcher = resolve_dispatcher_from_str(target) def wrapper(func): if extending.is_jitted(func): raise TypeError( "A jit decorator was called on an already jitted function " f"{func}. If trying to access the original python " f"function, use the {func}.py_func attribute." ) if not inspect.isfunction(func): raise TypeError( "The decorated object is not a function (got type " f"{type(func)})." ) if config.ENABLE_CUDASIM and target == 'cuda': from numba import cuda return cuda.jit(func) if config.DISABLE_JIT and not target == 'npyufunc': return func disp = dispatcher(py_func=func, locals=locals, targetoptions=targetoptions, **dispatcher_args) if cache: disp.enable_caching() if sigs is not None: # Register the Dispatcher to the type inference mechanism, # even though the decorator hasn't returned yet. from numba.core import typeinfer with typeinfer.register_dispatcher(disp): for sig in sigs: disp.compile(sig) disp.disable_compile() return disp return wrapper