def wrapper(func): if extending.is_jitted(func): raise TypeError( "A jit decorator was called on an already jitted function " f"{func}. If trying to access the original python " f"function, use the {func}.py_func attribute.") if not inspect.isfunction(func): raise TypeError("The decorated object is not a function (got type " f"{type(func)}).") if config.ENABLE_CUDASIM and target == 'cuda': from numba import cuda return cuda.jit(func) if config.DISABLE_JIT and not target == 'npyufunc': return func disp = dispatcher(py_func=func, locals=locals, targetoptions=targetoptions, **dispatcher_args) if cache: disp.enable_caching() if sigs is not None: # Register the Dispatcher to the type inference mechanism, # even though the decorator hasn't returned yet. from numba.core import typeinfer with typeinfer.register_dispatcher(disp): for sig in sigs: disp.compile(sig) disp.disable_compile() return disp
def __init__(self, py_func, identity=None, cache=False, targetoptions={}): if is_jitted(py_func): py_func = py_func.py_func self.py_func = py_func self.identity = parse_identity(identity) self.nb_func = jit(_target='npyufunc', cache=cache, **targetoptions)(py_func) self._sigs = [] self._cres = {}
def __init__(self, py_func, identity=None, cache=False, targetoptions={}): if is_jitted(py_func): py_func = py_func.py_func dispatcher = jit(_target='npyufunc', cache=cache, **targetoptions)(py_func) self._initialize(dispatcher, identity)