def get_template_info(cls): basepath = os.path.dirname(os.path.dirname(numba.__file__)) code, firstlineno = inspect.getsourcelines(pyfunc) path = inspect.getsourcefile(pyfunc) sig = str(utils.pysignature(pyfunc)) info = { 'kind': "overload", 'name': getattr(cls.key, '__name__', "unknown"), 'sig': sig, 'filename': utils.safe_relpath(path, start=basepath), 'lines': (firstlineno, firstlineno + len(code) - 1), 'docstring': pyfunc.__doc__ } return info
def get_template_info(self): basepath = os.path.dirname(os.path.dirname(numba.__file__)) impl = self._definition_func code, firstlineno, path = self.get_source_code_info(impl) sig = str(utils.pysignature(impl)) info = { 'kind': "intrinsic", 'name': getattr(impl, '__qualname__', impl.__name__), 'sig': sig, 'filename': utils.safe_relpath(path, start=basepath), 'lines': (firstlineno, firstlineno + len(code) - 1), 'docstring': impl.__doc__ } return info
def _get_implementation(self, args, kws): impl = self.py_func(*args, **kws) # Check the generating function and implementation signatures are # compatible, otherwise compiling would fail later. pysig = utils.pysignature(self.py_func) implsig = utils.pysignature(impl) ok = len(pysig.parameters) == len(implsig.parameters) if ok: for pyparam, implparam in zip(pysig.parameters.values(), implsig.parameters.values()): # We allow the implementation to omit default values, but # if it mentions them, they should have the same value... if (pyparam.name != implparam.name or pyparam.kind != implparam.kind or (implparam.default is not implparam.empty and implparam.default != pyparam.default)): ok = False if not ok: raise TypeError("generated implementation %s should be compatible " "with signature '%s', but has signature '%s'" % (impl, pysig, implsig)) self.impls.add(impl) return impl
def __init__(self, py_func, sigs, targetoptions): self.py_func = py_func self.sigs = [] self.link = targetoptions.pop('link', (),) self._can_compile = True # Specializations for given sets of argument types self.specializations = {} # A mapping of signatures to compile results self.overloads = collections.OrderedDict() self.targetoptions = targetoptions # defensive copy self.targetoptions['extensions'] = \ list(self.targetoptions.get('extensions', [])) from .descriptor import cuda_target self.typingctx = cuda_target.typingctx self._tm = default_type_manager pysig = utils.pysignature(py_func) arg_count = len(pysig.parameters) argnames = tuple(pysig.parameters) default_values = self.py_func.__defaults__ or () defargs = tuple(OmittedArg(val) for val in default_values) can_fallback = False # CUDA cannot fallback to object mode try: lastarg = list(pysig.parameters.values())[-1] except IndexError: has_stararg = False else: has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL exact_match_required = False _dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(), arg_count, self._fold_args, argnames, defargs, can_fallback, has_stararg, exact_match_required) if sigs: if len(sigs) > 1: raise TypeError("Only one signature supported at present") self.compile(sigs[0]) self._can_compile = False
def resolve_argsort(self, ary, args, kws): assert not args kwargs = dict(kws) kind = kwargs.pop('kind', types.StringLiteral('quicksort')) if not isinstance(kind, types.StringLiteral): raise TypingError('"kind" must be a string literal') if kwargs: msg = "Unsupported keywords: {!r}" raise TypingError(msg.format([k for k in kwargs.keys()])) if ary.ndim == 1: def argsort_stub(kind='quicksort'): pass pysig = utils.pysignature(argsort_stub) sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig) return sig
def get_template_info(self): impl = getattr(self, "generic") basepath = os.path.dirname(os.path.dirname(numba.__file__)) code, firstlineno, path = self.get_source_code_info(impl) sig = str(utils.pysignature(impl)) info = { 'kind': "overload", 'name': getattr(self.key, '__name__', getattr(impl, '__qualname__', impl.__name__),), 'sig': sig, 'filename': utils.safe_relpath(path, start=basepath), 'lines': (firstlineno, firstlineno + len(code) - 1), 'docstring': impl.__doc__ } return info
def resolve_argsort(self, ary, args, kws): assert not args kwargs = dict(kws) kind = kwargs.pop("kind", types.StringLiteral("quicksort")) if kwargs: msg = "Unsupported keywords: {!r}" raise TypingError(msg.format([k for k in kwargs.keys()])) if ary.ndim == 1: def argsort_stub(kind="quicksort"): pass pysig = utils.pysignature(argsort_stub) sig = signature(types.Array(types.intp, 1, "C"), kind).replace(pysig=pysig) return sig
def apply(self, args, kws): generic = getattr(self, "generic") typer = generic() sig = typer(*args, **kws) # Unpack optional type if no matching signature if sig is None: if any(isinstance(x, types.Optional) for x in args): def unpack_opt(x): if isinstance(x, types.Optional): return x.type else: return x args = list(map(unpack_opt, args)) sig = typer(*args, **kws) if sig is None: return # Get the pysig try: pysig = typer.pysig except AttributeError: pysig = utils.pysignature(typer) # Fold any keyword arguments bound = pysig.bind(*args, **kws) if bound.kwargs: raise TypingError("unsupported call signature") if not isinstance(sig, Signature): # If not a signature, `sig` is assumed to be the return type if not isinstance(sig, types.Type): raise TypeError( "invalid return type for callable template: got %r" % (sig, )) sig = signature(sig, *bound.args) if self.recvr is not None: sig = sig.replace(recvr=self.recvr) # Hack any omitted parameters out of the typer's pysig, # as lowering expects an exact match between formal signature # and actual args. if len(bound.args) < len(pysig.parameters): parameters = list(pysig.parameters.values())[:len(bound.args)] pysig = pysig.replace(parameters=parameters) sig = sig.replace(pysig=pysig) cases = [sig] return self._select(cases, bound.args, bound.kwargs)
def __init__(self, py_func, locals={}, targetoptions={}, impl_kind='direct', pipeline_class=compiler.Compiler): """ Parameters ---------- py_func: function object to be compiled locals: dict, optional Mapping of local variable names to Numba types. Used to override the types deduced by the type inference engine. targetoptions: dict, optional Target-specific config options. impl_kind: str Select the compiler mode for `@jit` and `@generated_jit` pipeline_class: type numba.compiler.CompilerBase The compiler pipeline type. """ self.typingctx = self.targetdescr.typing_context self.targetctx = self.targetdescr.target_context pysig = utils.pysignature(py_func) arg_count = len(pysig.parameters) can_fallback = not targetoptions.get('nopython', False) _DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback, exact_match_required=False) functools.update_wrapper(self, py_func) self.targetoptions = targetoptions self.locals = locals self._cache = NullCache() compiler_class = self._impl_kinds[impl_kind] self._impl_kind = impl_kind self._compiler = compiler_class(py_func, self.targetdescr, targetoptions, locals, pipeline_class) self._cache_hits = collections.Counter() self._cache_misses = collections.Counter() self._type = types.Dispatcher(self) self.typingctx.insert_global(self, self._type)
def _set_init(cls): """ Generate a wrapper for calling the constructor from pure Python. Note the wrapper will only accept positional arguments. """ init = cls.class_type.instance_type.methods['__init__'] init_sig = utils.pysignature(init) # get postitional and keyword arguments # offset by one to exclude the `self` arg args = _getargs(init_sig)[1:] cls._ctor_sig = init_sig ctor_source = _ctor_template.format(args=', '.join(args)) glbls = {"__numba_cls_": cls} exec(ctor_source, glbls) ctor = glbls['ctor'] cls._ctor = njit(ctor)
def _type_me(self, argtys, kwtys): """ Implement AbstractTemplate.generic() for the typing class built by StencilFunc._install_type(). Return the call-site signature. """ if (self.neighborhood is not None and len(self.neighborhood) != argtys[0].ndim): raise ValueError("%d dimensional neighborhood specified " "for %d dimensional input array" % (len(self.neighborhood), argtys[0].ndim)) argtys_extra = argtys sig_extra = "" result = None if 'out' in kwtys: argtys_extra += (kwtys['out'], ) sig_extra += ", out=None" result = kwtys['out'] if 'neighborhood' in kwtys: argtys_extra += (kwtys['neighborhood'], ) sig_extra += ", neighborhood=None" # look in the type cache first if argtys_extra in self._type_cache: (_sig, _, _, _) = self._type_cache[argtys_extra] return _sig (real_ret, typemap, calltypes) = self.get_return_type(argtys) sig = signature(real_ret, *argtys_extra) dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format( ",".join(self.kernel_ir.arg_names), sig_extra)) exec(dummy_text) in globals(), locals() dummy_func = eval("__numba_dummy_stencil") sig.pysig = utils.pysignature(dummy_func) self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra) ]) self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) return sig
def from_function(cls, pyfunc): """ Create the FunctionIdentity of the given function. """ func = get_function_object(pyfunc) code = get_code_object(func) pysig = utils.pysignature(func) if not code: raise errors.ByteCodeSupportError( "%s does not provide its bytecode" % func) try: func_qualname = func.__qualname__ except AttributeError: func_qualname = func.__name__ self = cls() self.func = func self.func_qualname = func_qualname self.func_name = func_qualname.split('.')[-1] self.code = code self.module = inspect.getmodule(func) self.modname = (utils._dynamic_modname if self.module is None else self.module.__name__) self.is_generator = inspect.isgeneratorfunction(func) self.pysig = pysig self.filename = code.co_filename self.firstlineno = code.co_firstlineno self.arg_count = len(pysig.parameters) self.arg_names = list(pysig.parameters) # Even the same function definition can be compiled into # several different function objects with distinct closure # variables, so we make sure to disambiguate using an unique id. uid = next(cls._unique_ids) self.unique_name = '{}${}'.format(self.func_qualname, uid) self.unique_id = uid return self
def _customize_tm_options(self, options): # Customize the target machine options. options["cpu"] = self._get_host_cpu_name() arch = ll.Target.from_default_triple().name if arch.startswith("x86"): reloc_model = "static" elif arch.startswith("ppc"): reloc_model = "pic" else: reloc_model = "default" options["reloc"] = reloc_model options["codemodel"] = "jitdefault" # Set feature attributes (such as ISA extensions) # This overrides default feature selection by CPU model above options["features"] = self._tm_features # Deal with optional argument to ll.Target.create_target_machine sig = utils.pysignature(ll.Target.create_target_machine) if "jit" in sig.parameters: # Mark that this is making a JIT engine options["jit"] = True
def get_call_template(self, args, kws): # Copied and simplified from _DispatcherBase.get_call_template. """ Get a typing.ConcreteTemplate for this dispatcher and the given *args* and *kws* types. This allows to resolve the return type. A (template, pysig, args, kws) tuple is returned. """ # Ensure an overload is available self.compile(tuple(args)) # Create function type for typing func_name = self.py_func.__name__ name = "CallTemplate({0})".format(func_name) # The `key` isn't really used except for diagnosis here, # so avoid keeping a reference to `cfunc`. call_template = typing.make_concrete_template( name, key=func_name, signatures=self.nopython_signatures) pysig = utils.pysignature(self.py_func) return call_template, pysig, args, kws
def generic(self, args, kws): """ Type the intrinsic by the arguments. """ from numba.core.imputils import lower_builtin cache_key = self.context, args, tuple(kws.items()) try: return self._impl_cache[cache_key] except KeyError: result = self._definition_func(self.context, *args, **kws) if result is None: return [sig, imp] = result pysig = utils.pysignature(self._definition_func) # omit context argument from user function parameters = list(pysig.parameters.values())[1:] sig = sig.replace(pysig=pysig.replace(parameters=parameters)) self._impl_cache[cache_key] = sig self._overload_cache[sig.args] = imp # register the lowering lower_builtin(imp, *sig.args)(imp) return sig
def get_call_template(self, args, kws): # Copied and simplified from _DispatcherBase.get_call_template. """ Get a typing.ConcreteTemplate for this dispatcher and the given *args* and *kws* types. This allows resolution of the return type. A (template, pysig, args, kws) tuple is returned. """ with self._compiling_counter: # Ensure an exactly-matching overload is available if we can # compile. We proceed with the typing even if we can't compile # because we may be able to force a cast on the caller side. if self._can_compile: self.compile_device(tuple(args)) # Create function type for typing func_name = self.py_func.__name__ name = "CallTemplate({0})".format(func_name) call_template = typing.make_concrete_template( name, key=func_name, signatures=self.nopython_signatures) pysig = utils.pysignature(self.py_func) return call_template, pysig, args, kws
def _validate_sigs(self, typing_func, impl_func): # check that the impl func and the typing func have the same signature! typing_sig = utils.pysignature(typing_func) impl_sig = utils.pysignature(impl_func) # the typing signature is considered golden and must be adhered to by # the implementation... # Things that are valid: # 1. args match exactly # 2. kwargs match exactly in name and default value # 3. Use of *args in the same location by the same name in both typing # and implementation signature # 4. Use of *args in the implementation signature to consume any number # of arguments in the typing signature. # Things that are invalid: # 5. Use of *args in the typing signature that is not replicated # in the implementing signature # 6. Use of **kwargs def get_args_kwargs(sig): kws = [] args = [] pos_arg = None for x in sig.parameters.values(): if x.default == utils.pyParameter.empty: args.append(x) if x.kind == utils.pyParameter.VAR_POSITIONAL: pos_arg = x elif x.kind == utils.pyParameter.VAR_KEYWORD: msg = ( "The use of VAR_KEYWORD (e.g. **kwargs) is " "unsupported. (offending argument name is '%s')") raise InternalError(msg % x) else: kws.append(x) return args, kws, pos_arg ty_args, ty_kws, ty_pos = get_args_kwargs(typing_sig) im_args, im_kws, im_pos = get_args_kwargs(impl_sig) sig_fmt = ("Typing signature: %s\n" "Implementation signature: %s") sig_str = sig_fmt % (typing_sig, impl_sig) err_prefix = "Typing and implementation arguments differ in " a = ty_args b = im_args if ty_pos: if not im_pos: # case 5. described above msg = ("VAR_POSITIONAL (e.g. *args) argument kind (offending " "argument name is '%s') found in the typing function " "signature, but is not in the implementing function " "signature.\n%s") % (ty_pos, sig_str) raise InternalError(msg) else: if im_pos: # no *args in typing but there's a *args in the implementation # this is case 4. described above b = im_args[:im_args.index(im_pos)] try: a = ty_args[:ty_args.index(b[-1]) + 1] except ValueError: # there's no b[-1] arg name in the ty_args, something is # very wrong, we can't work out a diff (*args consumes # unknown quantity of args) so just report first error specialized = "argument names.\n%s\nFirst difference: '%s'" msg = err_prefix + specialized % (sig_str, b[-1]) raise InternalError(msg) def gen_diff(typing, implementing): diff = set(typing) ^ set(implementing) return "Difference: %s" % diff if a != b: specialized = "argument names.\n%s\n%s" % (sig_str, gen_diff(a, b)) raise InternalError(err_prefix + specialized) # ensure kwargs are the same ty = [x.name for x in ty_kws] im = [x.name for x in im_kws] if ty != im: specialized = "keyword argument names.\n%s\n%s" msg = err_prefix + specialized % (sig_str, gen_diff( ty_kws, im_kws)) raise InternalError(msg) same = [x.default for x in ty_kws] == [x.default for x in im_kws] if not same: specialized = "keyword argument default values.\n%s\n%s" msg = err_prefix + specialized % (sig_str, gen_diff( ty_kws, im_kws)) raise InternalError(msg)
def __init__(self, py_func, sigs, targetoptions): self.py_func = py_func self.sigs = [] self.link = targetoptions.pop( 'link', (), ) self._can_compile = True self._type = self._numba_type_ # The compiling counter is only used when compiling device functions as # it is used to detect recursion - recursion is not possible when # compiling a kernel. self._compiling_counter = CompilingCounter() # Specializations for given sets of argument types self.specializations = {} # A mapping of signatures to compile results self.overloads = collections.OrderedDict() self.targetoptions = targetoptions # defensive copy self.targetoptions['extensions'] = \ list(self.targetoptions.get('extensions', [])) self.typingctx = self.targetdescr.typing_context self._tm = default_type_manager pysig = utils.pysignature(py_func) arg_count = len(pysig.parameters) argnames = tuple(pysig.parameters) default_values = self.py_func.__defaults__ or () defargs = tuple(OmittedArg(val) for val in default_values) can_fallback = False # CUDA cannot fallback to object mode try: lastarg = list(pysig.parameters.values())[-1] except IndexError: has_stararg = False else: has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL exact_match_required = False _dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(), arg_count, self._fold_args, argnames, defargs, can_fallback, has_stararg, exact_match_required) if sigs: if len(sigs) > 1: raise TypeError("Only one signature supported at present") if targetoptions.get('device'): argtypes, restype = sigutils.normalize_signature(sigs[0]) self.compile_device(argtypes) else: self.compile(sigs[0]) self._can_compile = False if targetoptions.get('device'): self._register_device_function()
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): # Overall approach: # 1) Construct a string containing a function definition for the stencil function # that will execute the stencil kernel. This function definition includes a # unique stencil function name, the parameters to the stencil kernel, loop # nests across the dimensions of the input array. Those loop nests use the # computed stencil kernel size so as not to try to compute elements where # elements outside the bounds of the input array would be needed. # 2) The but of the loop nest in this new function is a special sentinel # assignment. # 3) Get the IR of this new function. # 4) Split the block containing the sentinel assignment and remove the sentinel # assignment. Insert the stencil kernel IR into the stencil function IR # after label and variable renaming of the stencil kernel IR to prevent # conflicts with the stencil function IR. # 5) Compile the combined stencil function IR + stencil kernel IR into existence. # Copy the kernel so that our changes for this callsite # won't effect other callsites. (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(self.kernel_ir, calltypes) # The stencil kernel body becomes the body of a loop, for which args aren't needed. ir_utils.remove_args(kernel_copy.blocks) first_arg = kernel_copy.arg_names[0] in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) ir_utils.apply_copy_propagate(kernel_copy.blocks, in_cps, name_var_table, typemap, copy_calltypes) if "out" in name_var_table: raise ValueError( "Cannot use the reserved word 'out' in stencil kernels.") sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table) if config.DEBUG_ARRAY_OPT >= 1: print("name_var_table", name_var_table, sentinel_name) the_array = args[0] if config.DEBUG_ARRAY_OPT >= 1: print("_stencil_wrapper", return_type, return_type.dtype, type(return_type.dtype), args) ir_utils.dump_blocks(kernel_copy.blocks) # We generate a Numba function to execute this stencil and here # create the unique name of this function. stencil_func_name = "__numba_stencil_%s_%s" % (hex( id(the_array)).replace("-", "_"), self.id) # We will put a loop nest in the generated function for each # dimension in the input array. Here we create the name for # the index variable for each dimension. index0, index1, ... index_vars = [] for i in range(the_array.ndim): index_var_name = ir_utils.get_unused_var_name( "index" + str(i), name_var_table) index_vars += [index_var_name] # Create extra signature for out and neighborhood. out_name = ir_utils.get_unused_var_name("out", name_var_table) neighborhood_name = ir_utils.get_unused_var_name( "neighborhood", name_var_table) sig_extra = "" if result is not None: sig_extra += ", {}=None".format(out_name) if "neighborhood" in dict(self.kws): sig_extra += ", {}=None".format(neighborhood_name) # Get a list of the standard indexed array names. standard_indexed = self.options.get("standard_indexing", []) if first_arg in standard_indexed: raise ValueError("The first argument to a stencil kernel must " "use relative indexing, not standard indexing.") if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: raise ValueError("Standard indexing requested for an array name " "not present in the stencil kernel definition.") # Add index variables to getitems in the IR to transition the accesses # in the kernel from relative to regular Python indexing. Returns the # computed size of the stencil kernel and a list of the relatively indexed # arrays. kernel_size, relatively_indexed = self.add_indices_to_kernel( kernel_copy, index_vars, the_array.ndim, self.neighborhood, standard_indexed, typemap, copy_calltypes) if self.neighborhood is None: self.neighborhood = kernel_size if config.DEBUG_ARRAY_OPT >= 1: print("After add_indices_to_kernel") ir_utils.dump_blocks(kernel_copy.blocks) # The return in the stencil kernel becomes a setitem for that # particular point in the iteration space. ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, index_vars, out_name) if config.DEBUG_ARRAY_OPT >= 1: print("After replace_return_with_setitem", ret_blocks) ir_utils.dump_blocks(kernel_copy.blocks) # Start to form the new function to execute the stencil kernel. func_text = "def {}({}{}):\n".format(stencil_func_name, ",".join(kernel_copy.arg_names), sig_extra) # Get loop ranges for each dimension, which could be either int # or variable. In the latter case we'll use the extra neighborhood # argument to the function. ranges = [] for i in range(the_array.ndim): if isinstance(kernel_size[i][0], int): lo = kernel_size[i][0] hi = kernel_size[i][1] else: lo = "{}[{}][0]".format(neighborhood_name, i) hi = "{}[{}][1]".format(neighborhood_name, i) ranges.append((lo, hi)) # If there are more than one relatively indexed arrays, add a call to # a function that will raise an error if any of the relatively indexed # arrays are of different size than the first input array. if len(relatively_indexed) > 1: func_text += " raise_if_incompatible_array_sizes(" + first_arg for other_array in relatively_indexed: if other_array != first_arg: func_text += "," + other_array func_text += ")\n" # Get the shape of the first input array. shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table) func_text += " {} = {}.shape\n".format(shape_name, first_arg) # If we have to allocate the output array (the out argument was not used) # then us numpy.full if the user specified a cval stencil decorator option # or np.zeros if they didn't to allocate the array. if result is None: return_type_name = numpy_support.as_dtype( return_type.dtype).type.__name__ if "cval" in self.options: cval = self.options["cval"] if return_type.dtype != typing.typeof.typeof(cval): raise ValueError( "cval type does not match stencil return type.") out_init = "{} = np.full({}, {}, dtype=np.{})\n".format( out_name, shape_name, cval, return_type_name) else: out_init = "{} = np.zeros({}, dtype=np.{})\n".format( out_name, shape_name, return_type_name) func_text += " " + out_init else: # result is present, if cval is set then use it if "cval" in self.options: cval = self.options["cval"] cval_ty = typing.typeof.typeof(cval) if not self._typingctx.can_convert(cval_ty, return_type.dtype): msg = "cval type does not match stencil return type." raise ValueError(msg) out_init = "{}[:] = {}\n".format(out_name, cval) func_text += " " + out_init offset = 1 # Add the loop nests to the new function. for i in range(the_array.ndim): for j in range(offset): func_text += " " # ranges[i][0] is the minimum index used in the i'th dimension # but minimum's greater than 0 don't preclude any entry in the array. # So, take the minimum of 0 and the minimum index found in the kernel # and this will be a negative number (potentially -0). Then, we do # unary - on that to get the positive offset in this dimension whose # use is precluded. # ranges[i][1] is the maximum of 0 and the observed maximum index # in this dimension because negative maximums would not cause us to # preclude any entry in the array from being used. func_text += ("for {} in range(-min(0,{})," "{}[{}]-max(0,{})):\n").format( index_vars[i], ranges[i][0], shape_name, i, ranges[i][1]) offset += 1 for j in range(offset): func_text += " " # Put a sentinel in the code so we can locate it in the IR. We will # remove this sentinel assignment and replace it with the IR for the # stencil kernel body. func_text += "{} = 0\n".format(sentinel_name) func_text += " return {}\n".format(out_name) if config.DEBUG_ARRAY_OPT >= 1: print("new stencil func text") print(func_text) # Force the new stencil function into existence. exec(func_text) in globals(), locals() stencil_func = eval(stencil_func_name) if sigret is not None: pysig = utils.pysignature(stencil_func) sigret.pysig = pysig # Get the IR for the newly created stencil function. from numba.core import compiler stencil_ir = compiler.run_frontend(stencil_func) ir_utils.remove_dels(stencil_ir.blocks) # rename all variables in stencil_ir afresh var_table = ir_utils.get_name_var_table(stencil_ir.blocks) new_var_dict = {} reserved_names = ( [sentinel_name, out_name, neighborhood_name, shape_name] + kernel_copy.arg_names + index_vars) for name, var in var_table.items(): if not name in reserved_names: new_var_dict[name] = ir_utils.mk_unique_var(name) ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 # Shift labels in the kernel copy so they are guaranteed unique # and don't conflict with any labels in the stencil_ir. kernel_copy.blocks = ir_utils.add_offset_to_labels( kernel_copy.blocks, stencil_stub_last_label) new_label = max(kernel_copy.blocks.keys()) + 1 # Adjust ret_blocks to account for addition of the offset. ret_blocks = [x + stencil_stub_last_label for x in ret_blocks] if config.DEBUG_ARRAY_OPT >= 1: print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label) print("before replace sentinel stencil_ir") ir_utils.dump_blocks(stencil_ir.blocks) print("before replace sentinel kernel_copy") ir_utils.dump_blocks(kernel_copy.blocks) # Search all the block in the stencil outline for the sentinel. for label, block in stencil_ir.blocks.items(): for i, inst in enumerate(block.body): if (isinstance(inst, ir.Assign) and inst.target.name == sentinel_name): # We found the sentinel assignment. loc = inst.loc scope = block.scope # split block across __sentinel__ # A new block is allocated for the statements prior to the # sentinel but the new block maintains the current block # label. prev_block = ir.Block(scope, loc) prev_block.body = block.body[:i] # The current block is used for statements after sentinel. block.body = block.body[i + 1:] # But the current block gets a new label. body_first_label = min(kernel_copy.blocks.keys()) # The previous block jumps to the minimum labelled block of # the parfor body. prev_block.append(ir.Jump(body_first_label, loc)) # Add all the parfor loop body blocks to the gufunc # function's IR. for (l, b) in kernel_copy.blocks.items(): stencil_ir.blocks[l] = b stencil_ir.blocks[new_label] = block stencil_ir.blocks[label] = prev_block # Add a jump from all the blocks that previously contained # a return in the stencil kernel to the block # containing statements after the sentinel. for ret_block in ret_blocks: stencil_ir.blocks[ret_block].append( ir.Jump(new_label, loc)) break else: continue break stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) ir_utils.remove_dels(stencil_ir.blocks) assert (isinstance(the_array, types.Type)) array_types = args new_stencil_param_types = list(array_types) if config.DEBUG_ARRAY_OPT >= 1: print("new_stencil_param_types", new_stencil_param_types) ir_utils.dump_blocks(stencil_ir.blocks) # Compile the combined stencil function with the replaced loop # body in it. new_func = compiler.compile_ir(self._typingctx, self._targetctx, stencil_ir, new_stencil_param_types, None, compiler.DEFAULT_FLAGS, {}) return new_func
def generic(self, args, kws): """ Type the intrinsic by the arguments. """ from numba.core.target_extension import (get_local_target, resolve_target_str, dispatcher_registry) from numba.core.imputils import builtin_registry cache_key = self.context, args, tuple(kws.items()) hwstr = self.metadata.get('target', 'generic') # Get the class for the target declared by the function hw_clazz = resolve_target_str(hwstr) # get the local target target_hw = get_local_target(self.context) # make sure the target_hw is in the MRO for hw_clazz else bail if not target_hw.inherits_from(hw_clazz): msg = (f"Intrinsic being resolved on a target from which it does " f"not inherit. Local target is {target_hw}, declared " f"target class is {hw_clazz}.") raise InternalError(msg) disp = dispatcher_registry[target_hw] tgtctx = disp.targetdescr.target_context # This is all workarounds... # The issue is that whilst targets shouldn't care about which registry # in which to register lowering implementations, the CUDA target # "borrows" implementations from the CPU from specific registries. This # means that if some impl is defined via @intrinsic, e.g. numba.*unsafe # modules, _AND_ CUDA also makes use of the same impl, then it's # required that the registry in use is one that CUDA borrows from. This # leads to the following expression where by the CPU builtin_registry is # used if it is in the target context as a known registry (i.e. the # target installed it) and if it is not then it is assumed that the # registries for the target are unbound to any other target and so it's # fine to use any of them as a place to put lowering impls. # # NOTE: This will need subsequently fixing again when targets use solely # the extension APIs to describe their implementation. The issue will be # that the builtin_registry should contain _just_ the stack allocated # implementations and low level target invariant things and should not # be modified further. It should be acceptable to remove the `then` # branch and just keep the `else`. # In case the target has swapped, e.g. cuda borrowing cpu, refresh to # populate. tgtctx.refresh() if builtin_registry in tgtctx._registries: reg = builtin_registry else: # Pick a registry in which to install intrinsics registries = iter(tgtctx._registries) reg = next(registries) lower_builtin = reg.lower try: return self._impl_cache[cache_key] except KeyError: pass result = self._definition_func(self.context, *args, **kws) if result is None: return [sig, imp] = result pysig = utils.pysignature(self._definition_func) # omit context argument from user function parameters = list(pysig.parameters.values())[1:] sig = sig.replace(pysig=pysig.replace(parameters=parameters)) self._impl_cache[cache_key] = sig self._overload_cache[sig.args] = imp # register the lowering lower_builtin(imp, *sig.args)(imp) return sig
def _get_signature(cls, typingctx, fnty, args, kws): sig = fnty.get_call_type(typingctx, args, kws) sig = sig.replace(pysig=utils.pysignature(cls._overload_func)) return sig
def _has_loc(fn): """Does function *fn* take ``loc`` argument? """ sig = utils.pysignature(fn) return 'loc' in sig.parameters
def sum_expand(self, args, kws): """ sum can be called with or without an axis parameter, and with or without a dtype parameter """ pysig = None if 'axis' in kws and 'dtype' not in kws: def sum_stub(axis): pass pysig = utils.pysignature(sum_stub) # rewrite args args = list(args) + [kws['axis']] elif 'dtype' in kws and 'axis' not in kws: def sum_stub(dtype): pass pysig = utils.pysignature(sum_stub) # rewrite args args = list(args) + [kws['dtype']] elif 'dtype' in kws and 'axis' in kws: def sum_stub(axis, dtype): pass pysig = utils.pysignature(sum_stub) # rewrite args args = list(args) + [kws['axis'], kws['dtype']] args_len = len(args) assert args_len <= 2 if args_len == 0: # No axis or dtype parameter so the return type of the summation is a scalar # of the type of the array. out = signature(_expand_integer(self.this.dtype), *args, recvr=self.this) elif args_len == 1 and 'dtype' not in kws: # There is an axis parameter, either arg or kwarg if self.this.ndim == 1: # 1d reduces to a scalar return_type = self.this.dtype else: # the return type of this summation is an array of dimension one # less than the input array. return_type = types.Array(dtype=_expand_integer(self.this.dtype), ndim=self.this.ndim-1, layout='C') out = signature(return_type, *args, recvr=self.this) elif args_len == 1 and 'dtype' in kws: # No axis parameter so the return type of the summation is a scalar # of the dtype parameter. from .npydecl import parse_dtype dtype, = args dtype = parse_dtype(dtype) out = signature(dtype, *args, recvr=self.this) elif args_len == 2: # There is an axis and dtype parameter, either arg or kwarg from .npydecl import parse_dtype dtype = parse_dtype(args[1]) return_type = dtype if self.this.ndim != 1: # 1d reduces to a scalar, 2d and above reduce dim by 1 # the return type of this summation is an array of dimension one # less than the input array. return_type = types.Array(dtype=return_type, ndim=self.this.ndim-1, layout='C') out = signature(return_type, *args, recvr=self.this) else: pass return out.replace(pysig=pysig)