def test2(self): typingctx = typing.Context() targetctx = cpu.CPUContext(typingctx) test_ir = compiler.run_frontend(test_wont_propagate) #print("Num blocks = ", len(test_ir.blocks)) #print(test_ir.dump()) with cpu_target.nested_context(typingctx, targetctx): typingctx.refresh() targetctx.refresh() args = (types.int64, types.int64, types.int64) typemap, return_type, calltypes = type_inference_stage( typingctx, test_ir, args, None) type_annotation = type_annotations.TypeAnnotation( func_ir=test_ir, typemap=typemap, calltypes=calltypes, lifted=(), lifted_from=None, args=args, return_type=return_type, html_output=config.HTML) in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes) self.assertTrue(findAssign(test_ir, "x"))
def test1(self): typingctx = typing.Context() targetctx = cpu.CPUContext(typingctx) test_ir = compiler.run_frontend(test_will_propagate) with cpu_target.nested_context(typingctx, targetctx): typingctx.refresh() targetctx.refresh() args = (types.int64, types.int64, types.int64) typemap, return_type, calltypes = type_inference_stage( typingctx, test_ir, args, None) type_annotation = type_annotations.TypeAnnotation( func_ir=test_ir, typemap=typemap, calltypes=calltypes, lifted=(), lifted_from=None, args=args, return_type=return_type, html_output=config.HTML) remove_dels(test_ir.blocks) in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes) remove_dead(test_ir.blocks, test_ir.arg_names, test_ir) self.assertFalse(findLhsAssign(test_ir, "x"))
def literal_type(self): if self._literal_type_cache is None: from numba.core import typing ctx = typing.Context() res = ctx.resolve_value_type(self.literal_value) self._literal_type_cache = res return self._literal_type_cache
def test_integer(self): ctx = typing.Context() for aty, bty in itertools.product(types.integer_domain, types.integer_domain): key = (str(aty), str(bty)) try: expected = self.int_unify[key] except KeyError: expected = self.int_unify[key[::-1]] self.assert_unify(aty, bty, getattr(types, expected))
def assert_unify(self, aty, bty, expected): ctx = typing.Context() template = "{0}, {1} -> {2} != {3}" for unify_func in ctx.unify_types, ctx.unify_pairs: unified = unify_func(aty, bty) self.assertEqual(unified, expected, msg=template.format(aty, bty, unified, expected)) unified = unify_func(bty, aty) self.assertEqual(unified, expected, msg=template.format(bty, aty, unified, expected))
def test_ambiguous_error(self): ctx = typing.Context() cases = [i16(i16, i16), i32(i32, i32)] with self.assertRaises(TypeError) as raises: ctx.resolve_overload("foo", cases, (i8, i8), {}, allow_ambiguous=False) self.assertEqual(str(raises.exception).splitlines(), ["Ambiguous overloading for foo (int8, int8):", "(int16, int16) -> int16", "(int32, int32) -> int32", ])
def mk_pipeline(cls, args, return_type=None, flags=None, locals={}, library=None, typing_context=None, target_context=None): if not flags: flags = Flags() flags.nrt = True if typing_context is None: typing_context = typing.Context() if target_context is None: target_context = cpu.CPUContext(typing_context) return cls(typing_context, target_context, library, args, return_type, flags, locals)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS, locals={}): """ Compile the function in an isolated environment (typing and target context). Good for testing. """ from numba.core.registry import cpu_target typingctx = typing.Context() targetctx = cpu.CPUContext(typingctx) # Register the contexts in case for nested @jit or @overload calls with cpu_target.nested_context(typingctx, targetctx): return compile_extra(typingctx, targetctx, func, args, return_type, flags, locals)
def unify_number_pair_test(self, n): """ Test all permutations of N-combinations of numeric types and ensure that the order of types in the sequence is irrelevant. """ ctx = typing.Context() for tys in itertools.combinations(types.number_domain, n): res = [ctx.unify_types(*comb) for comb in itertools.permutations(tys)] first_result = res[0] # Sanity check self.assertIsInstance(first_result, types.Number) # All results must be equal for other in res[1:]: self.assertEqual(first_result, other)
def test_none_to_optional(self): """ Test unification of `none` and multiple number types to optional type """ ctx = typing.Context() for tys in itertools.combinations(types.number_domain, 2): # First unify without none, to provide the control value tys = list(tys) expected = types.Optional(ctx.unify_types(*tys)) results = [ctx.unify_types(*comb) for comb in itertools.permutations(tys + [types.none])] # All results must be equal for res in results: self.assertEqual(res, expected)
def __init__(self, test_ir, args): self.state = compiler.StateDict() self.state.typingctx = typing.Context() self.state.targetctx = cpu.CPUContext(self.state.typingctx) self.state.func_ir = test_ir self.state.func_id = test_ir.func_id self.state.args = args self.state.return_type = None self.state.locals = dict() self.state.status = None self.state.lifted = dict() self.state.lifted_from = None self.state.typingctx.refresh() self.state.targetctx.refresh()
def _run_parfor(cls, test_func, args, swap_map=None): # TODO: refactor this with get_optimized_numba_ir() where this is # copied from typingctx = typing.Context() targetctx = cpu.CPUContext(typingctx) test_ir = compiler.run_frontend(test_func) options = cpu.ParallelOptions(True) tp = MyPipeline(typingctx, targetctx, args, test_ir) with cpu_target.nested_context(typingctx, targetctx): typingctx.refresh() targetctx.refresh() inline_pass = inline_closurecall.InlineClosureCallPass( tp.state.func_ir, options, typed=True ) inline_pass.run() rewrites.rewrite_registry.apply("before-inference", tp.state) untyped_passes.ReconstructSSA().run_pass(tp.state) ( tp.state.typemap, tp.state.return_type, tp.state.calltypes, _ ) = typed_passes.type_inference_stage( tp.state.typingctx, tp.state.func_ir, tp.state.args, None ) typed_passes.PreLowerStripPhis().run_pass(tp.state) diagnostics = numba.parfors.parfor.ParforDiagnostics() preparfor_pass = numba.parfors.parfor.PreParforPass( tp.state.func_ir, tp.state.typemap, tp.state.calltypes, tp.state.typingctx, options, swapped=diagnostics.replaced_fns, replace_functions_map=swap_map, ) preparfor_pass.run() rewrites.rewrite_registry.apply("after-inference", tp.state) return tp, options, diagnostics, preparfor_pass
def literal_type(self): if self._literal_type_cache is None: from numba.core import typing ctx = typing.Context() try: res = ctx.resolve_value_type(self.literal_value) except ValueError: # Not all literal types have a literal_value that can be # resolved to a type, for example, LiteralStrKeyDict has a # literal_value that is a python dict for which there's no # `typeof` support. msg = "{} has no attribute 'literal_type'".format(self) raise AttributeError(msg) self._literal_type_cache = res return self._literal_type_cache
def _context_builder_sig_args(self): typing_context = typing.Context() context = cpu.CPUContext(typing_context) lib = context.codegen().create_library('testing') with context.push_code_library(lib): module = lc.Module("test_module") sig = typing.signature(types.int32, types.int32) llvm_fnty = context.call_conv.get_function_type(sig.return_type, sig.args) function = module.get_or_insert_function(llvm_fnty, name='test_fn') args = context.call_conv.get_arguments(function) assert function.is_declaration entry_block = function.append_basic_block('entry') builder = lc.Builder(entry_block) yield context, builder, sig, args
def test1(self): typingctx = typing.Context() targetctx = cpu.CPUContext(typingctx) test_ir = compiler.run_frontend(test_will_propagate) with cpu_target.nested_context(typingctx, targetctx): typingctx.refresh() targetctx.refresh() args = (types.int64, types.int64, types.int64) typemap, _, calltypes, _ = type_inference_stage( typingctx, targetctx, test_ir, args, None) remove_dels(test_ir.blocks) in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes) remove_dead(test_ir.blocks, test_ir.arg_names, test_ir) self.assertFalse(findLhsAssign(test_ir, "x"))
def compile_to_LLVM(functions_and_signatures, target: TargetInfo, pipeline_class=compiler.Compiler, debug=False): """Compile functions with given signatures to target specific LLVM IR. Parameters ---------- functions_and_signatures : list Specify a list of Python function and its signatures pairs. target : TargetInfo Specify target device information. debug : bool Returns ------- module : llvmlite.binding.ModuleRef LLVM module instance. To get the IR string, use `str(module)`. """ target_desc = registry.cpu_target if target is None: target = TargetInfo.host() typing_context = target_desc.typing_context target_context = target_desc.target_context else: typing_context = typing.Context() target_context = RemoteCPUContext(typing_context, target) # Bring over Array overloads (a hack): target_context._defns = target_desc.target_context._defns typing_context.target_info = target target_context.target_info = target codegen = target_context.codegen() main_library = codegen.create_library('rbc.irtools.compile_to_IR') main_module = main_library._final_module flags = compiler.Flags() flags.set('no_compile') flags.set('no_cpython_wrapper') function_names = [] for func, signatures in functions_and_signatures: for sig in signatures: fname = func.__name__ + sig.mangling function_names.append(fname) args, return_type = sigutils.normalize_signature( sig.tonumba(bool_is_int8=True)) cres = compiler.compile_extra(typingctx=typing_context, targetctx=target_context, func=func, args=args, return_type=return_type, flags=flags, library=main_library, locals={}, pipeline_class=pipeline_class) make_wrapper(fname, args, return_type, cres) seen = set() for _library in main_library._linking_libraries: if _library not in seen: seen.add(_library) main_module.link_in( _library._get_module_for_linking(), preserve=True, ) main_library._optimize_final_module() # Catch undefined functions: used_functions = set(function_names) for fname in function_names: deps = get_function_dependencies(main_module, fname) for fn, descr in deps.items(): used_functions.add(fn) if descr == 'undefined': if fn.startswith('numba_') and target.has_numba: continue if fn.startswith('Py') and target.has_cpython: continue raise RuntimeError('function `%s` is undefined' % (fn)) # for global_variable in main_module.global_variables: # global_variable.linkage = llvm.Linkage.private unused_functions = [ f.name for f in main_module.functions if f.name not in used_functions ] if debug: print('compile_to_IR: the following functions are used') for fname in used_functions: lf = main_module.get_function(fname) print(' [ALIVE]', fname, 'with', lf.linkage) if unused_functions: if debug: print('compile_to_IR: the following functions are not used' ' and will be removed:') for fname in unused_functions: lf = main_module.get_function(fname) if lf.is_declaration: # if the function is a declaration, # we just put the linkage as external lf.linkage = llvm.Linkage.external else: # but if the function is not a declaration, # we change the linkage to private lf.linkage = llvm.Linkage.private if debug: print(' [DEAD]', fname, 'with', lf.linkage) main_library._optimize_final_module() # TODO: determine unused global_variables and struct_types main_module.verify() main_library._finalized = True main_module.triple = target.triple main_module.data_layout = target.datalayout return main_module
def compile_to_LLVM(functions_and_signatures, target_info: TargetInfo, pipeline_class=compiler.Compiler, user_defined_llvm_ir=None, debug=False): """Compile functions with given signatures to target specific LLVM IR. Parameters ---------- functions_and_signatures : list Specify a list of Python function and its signatures pairs. target : TargetInfo Specify target device information. user_defined_llvm_ir : {None, str, ModuleRef} Specify user-defined LLVM IR module that is linked in to the returned module. debug : bool Returns ------- module : llvmlite.binding.ModuleRef LLVM module instance. To get the IR string, use `str(module)`. """ target_desc = registry.cpu_target if target_info is None: # RemoteJIT target_info = TargetInfo.host() typing_context = target_desc.typing_context target_context = target_desc.target_context else: # OmnisciDB target if target_info.is_cpu: typing_context = typing.Context() target_context = JITRemoteCPUContext(typing_context) elif target_info.is_gpu: typing_context = JITRemoteGPUTypingContext() target_context = JITRemoteGPUTargetContext(typing_context) else: raise ValueError(f'Unknown target {target_info.name}') # Bring over Array overloads (a hack): target_context._defns = target_desc.target_context._defns with replace_numba_internals_hack(): codegen = target_context.codegen() main_library = codegen.create_library('rbc.irtools.compile_to_IR') main_module = main_library._final_module if user_defined_llvm_ir is not None: if isinstance(user_defined_llvm_ir, str): user_defined_llvm_ir = llvm.parse_assembly( user_defined_llvm_ir) assert isinstance(user_defined_llvm_ir, llvm.ModuleRef) main_module.link_in(user_defined_llvm_ir, preserve=True) succesful_fids = [] function_names = [] for func, signatures in functions_and_signatures: for fid, sig in signatures.items(): fname = compile_instance(func, sig, target_info, typing_context, target_context, pipeline_class, main_library, debug=debug) if fname is not None: succesful_fids.append(fid) function_names.append(fname) main_library._optimize_final_module() # Remove unused defined functions and declarations used_symbols = defaultdict(set) for fname in function_names: for k, v in get_called_functions(main_library, fname).items(): used_symbols[k].update(v) all_symbols = get_called_functions(main_library) unused_symbols = defaultdict(set) for k, lst in all_symbols.items(): if k == 'libraries': continue for fn in lst: if fn not in used_symbols[k]: unused_symbols[k].add(fn) changed = False for f in main_module.functions: fn = f.name if fn.startswith('llvm.'): if f.name in unused_symbols['intrinsics']: f.linkage = llvm.Linkage.external changed = True elif f.is_declaration: if f.name in unused_symbols['declarations']: f.linkage = llvm.Linkage.external changed = True else: if f.name in unused_symbols['defined']: f.linkage = llvm.Linkage.private changed = True # TODO: determine unused global_variables and struct_types if changed: main_library._optimize_final_module() main_module.verify() main_library._finalized = True main_module.triple = target_info.triple main_module.data_layout = target_info.datalayout return main_module, succesful_fids
def __init__(self): self.typingctx = typing.Context() self.targetctx = cpu.CPUContext(self.typingctx) self.cr_cache = {}
def setUp(self): # Reset the Dummy class Dummy.alive = 0 # initialize the NRT (in case the tests are run in isolation) cpu.CPUContext(typing.Context())
def setUp(self): super(BaseTestWithLifting, self).setUp() self.typingctx = typing.Context() self.targetctx = cpu.CPUContext(self.typingctx) self.flags = DEFAULT_FLAGS
def _toplevel_typing_context(self): # Lazily-initialized top-level typing context, for all threads return typing.Context()
def assert_resolve_overload(self, cases, args, expected): ctx = typing.Context() got = ctx.resolve_overload("foo", cases, args, {}) self.assertEqual(got, expected)
def test_convert_number_types(self): # Check that Context.can_convert() is compatible with the default # number conversion rules registered in the typeconv module # (which is used internally by the C _Dispatcher object). ctx = typing.Context() self.check_number_compatibility(ctx.can_convert)
def assert_cannot_convert(self, aty, bty): ctx = typing.Context() got = ctx.can_convert(aty, bty) self.assertIsNone(got)
def assert_can_convert(self, aty, bty, expected): ctx = typing.Context() got = ctx.can_convert(aty, bty) self.assertEqual(got, expected)
def setUp(self): typing_context = typing.Context() self.context = cpu.CPUContext(typing_context)