def test_target_data_non_default_context(self): context = ir.Context() mytype = context.get_identified_type("MyType") mytype.elements = [ir.IntType(32)] module = ir.Module(context=context) td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") self.assertEqual(mytype.get_abi_size(td, context=context), 4)
def init(self): from . import cudaimpl, libdevice self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit") self.insert_func_defn(cudaimpl.registry.functions) self.insert_func_defn(libdevice.registry.functions) self._target_data = ll.create_target_data(nvvm.default_data_layout)
def _module_pass_manager(self): pm = ll.create_module_pass_manager() dl = ll.create_target_data(self._data_layout) dl.add_pass(pm) self._tm.add_analysis_passes(pm) with self._pass_manager_builder() as pmb: pmb.populate(pm) return pm
def init(self): from . import cudaimpl, libdevice self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit") self.install_registry(cudaimpl.registry) self.install_registry(libdevice.registry) self.install_registry(cmathimpl.registry) self._target_data = ll.create_target_data(nvvm.default_data_layout)
def test_abi_alignment(self): td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") def check(tp, expected): self.assertIn(tp.get_abi_alignment(td), expected) check(int8, (1, 2, 4)) check(int32, (4,)) check(int64, (8,)) check(ir.ArrayType(int8, 5), (1, 2, 4)) check(ir.ArrayType(int32, 5), (4,)) check(ir.LiteralStructType((dbl, flt, flt)), (8,))
def test_abi_size(self): td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") def check(tp, expected): self.assertEqual(tp.get_abi_size(td), expected) check(int8, 1) check(int32, 4) check(int64, 8) check(ir.ArrayType(int8, 5), 5) check(ir.ArrayType(int32, 5), 20) check(ir.LiteralStructType((dbl, flt, flt)), 16)
def build_pass_managers(**kws): mod = kws.get('mod') if not mod: raise NameError("module must be provided") pm = llvm.create_module_pass_manager() if kws.get('fpm', True): assert isinstance(mod, llvm.ModuleRef) fpm = llvm.create_function_pass_manager(mod) else: fpm = None with llvm.create_pass_manager_builder() as pmb: pmb.opt_level = opt = kws.get('opt', 2) pmb.loop_vectorize = kws.get('loop_vectorize', False) pmb.slp_vectorize = kws.get('slp_vectorize', False) pmb.inlining_threshold = _inlining_threshold(optlevel=opt) if mod: dl = llvm.create_target_data(mod.data_layout) dl.add_pass(pm) if fpm is not None: dl.add_pass(fpm) tli = llvm.create_target_library_info(mod.triple) if kws.get('nobuiltins', False): # Disable all builtins (-fno-builtins) tli.disable_all() else: # Disable a list of builtins given for k in kws.get('disable_builtins', ()): libf = tli.get_libfunc(k) tli.set_unavailable(libf) tli.add_pass(pm) if fpm is not None: tli.add_pass(fpm) tm = kws.get('tm') if tm: tm.add_analysis_passes(pm) if fpm is not None: tm.add_analysis_passes(fpm) pmb.populate(pm) if fpm is not None: pmb.populate(fpm) return namedtuple("pms", ['pm', 'fpm'])(pm=pm, fpm=fpm)
def __init__(self, module_name): self.module = ir.Module(module_name) self.module.triple = binding.get_default_triple() self.module.data_layout = '' self.target_data = binding.create_target_data(self.module.data_layout) self.builder = None self.terminating_return = None self.insert_blocks = [] self.fns = {} self.vars = {} self.nstrings = 0 self.zero = self.getint(0) self.memset = None self.loops = []
def build_pass_managers(**kws): mod = kws.get('mod') if not mod: raise NameError("module must be provided") pm = llvm.create_module_pass_manager() if kws.get('fpm', True): assert isinstance(mod, llvm.ModuleRef) fpm = llvm.create_function_pass_manager(mod) else: fpm = None with llvm.create_pass_manager_builder() as pmb: pmb.opt_level = opt = kws.get('opt', 2) pmb.loop_vectorize = kws.get('loop_vectorize', False) pmb.inlining_threshold = _inline_threshold(optlevel=opt) if mod: dl = llvm.create_target_data(mod.data_layout) dl.add_pass(pm) if fpm is not None: dl.add_pass(fpm) tli = llvm.create_target_library_info(mod.triple) if kws.get('nobuiltins', False): # Disable all builtins (-fno-builtins) tli.disable_all() else: # Disable a list of builtins given for k in kws.get('disable_builtins', ()): libf = tli.get_libfunc(k) tli.set_unavailable(libf) tli.add_pass(pm) if fpm is not None: tli.add_pass(fpm) tm = kws.get('tm') if tm: tm.add_analysis_passes(pm) if fpm is not None: tm.add_analysis_passes(fpm) pmb.populate(pm) if fpm is not None: pmb.populate(fpm) return namedtuple("pms", ['pm', 'fpm'])(pm=pm, fpm=fpm)
def init(self): self._internal_codegen = codegen.JITSPIRVCodegen("numba_dppy.jit") self._target_data = ll.create_target_data( codegen.SPIR_DATA_LAYOUT[utils.MACHINE_BITS]) # Override data model manager to SPIR model import numba.cpython.unicode self.data_model_manager = _init_data_model_manager() self.extra_compile_options = dict() import copy from numba.np.ufunc_db import _lazy_init_db _lazy_init_db() from numba.np.ufunc_db import _ufunc_db as ufunc_db self.ufunc_db = copy.deepcopy(ufunc_db) self.cpu_context = cpu_target.target_context
def _init(self, llvm_module): assert list(llvm_module.global_variables) == [], "Module isn't empty" self._data_layout = DATALAYOUT[utils.MACHINE_BITS] self._target_data = ll.create_target_data(self._data_layout)
def _init(self, llvm_module): assert list(llvm_module.global_variables) == [], "Module isn't empty" self._data_layout = nvvm.default_data_layout self._target_data = ll.create_target_data(self._data_layout)
def _get_target_data(context): return ll.create_target_data(hlc.DATALAYOUT[context.address_size])
b = binding.create_context() i64 = ir.IntType(64) fp = ir.DoubleType() boolean = ir.IntType(1) unit = 'll_unit' module = ir.Module(name='Default') fnty = ir.FunctionType(ir.VoidType(), ()) ext_func = ir.Function(module, fnty, name='test') fnty = ir.FunctionType(i64, (i64,)) runtime_my_malloc = ir.Function(module, fnty, name='My_Malloc') fnty = ir.FunctionType(i64, (i64,)) runtime_print_int = ir.Function(module, fnty, name = 'Print_int') t_data = binding.create_target_data('e-m:o-i64:64-f80:128-n8:16:32:64-S128') def gen_unit(t): return t(None) defaults = { NamedType('Int'): i64, NamedType('Float'): fp, NamedType('Bool'): boolean } def llvm_fun_type(ftype, nst, ctx): assert(isinstance(ftype, FunType)) lltypes = ctx['llvm_custom_types'] p_type, ret_type = ftype.args, ftype.to if isinstance(p_type, tuple):
sys.exit("Error: Too few arguments\n%s" % USAGE) if len(sys.argv) > 2: warnings.warn("Ignoring extra arguments: %s" % (sys.argv[2:],)) if sys.argv[1] == "-h": print(USAGE) else: show_file(sys.argv[1]) debug_prints = True ############## End Stuff to print out pyc file module = ir.Module(name=__file__) module.triple = "x86_64-pc-linux-gnu" td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128") from irtypes import * from magic import * const_map = {} noimp = ir.GlobalVariable(module,pynoimp_type,"global_noimp") const_map[(type(noimp),noimp) ] = noimp tp_pers = ir.FunctionType(int32, (), var_arg=True) pers = ir.Function(module, tp_pers, '__gxx_personality_v0') i=0 def get_int_array(a):
def _get_target_data(context): return ll.create_target_data(nvvm.data_layout[context.address_size])
def target_data(self): return llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def target_data(self): return binding.create_target_data(self.module.data_layout)
def _get_target_data(context): return ll.create_target_data(SPIR_DATA_LAYOUT[context.address_size])
def init(self): self._internal_codegen = codegen.JITHSACodegen("numba.hsa.jit") self._target_data = ll.create_target_data(DATALAYOUT[utils.MACHINE_BITS]) # Override data model manager self.data_model_manager = hsa_data_model_manager
def init(self): self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit") self._target_data = ll.create_target_data(nvvm.default_data_layout)
def generate_vartypes(module=None): _vartypes = Map({ # singleton 'u1': Bool(), 'i8': SignedInt(8), 'i16': SignedInt(16), 'i32': SignedInt(32), 'i64': SignedInt(64), 'u8': UnsignedInt(8), 'u16': UnsignedInt(16), 'u32': UnsignedInt(32), 'u64': UnsignedInt(64), 'f64': Float64(), 'u_size': None, # u_size is set on init # non-singleton 'array': Array, 'func': ir.FunctionType, # object types 'str': Str, 'ptrobj': Ptr, 'None': NoneType, # 'any': Any }) # TODO: add c_type to native llvmlite float # as we did with int _vartypes.f64.c_type = ctypes.c_longdouble # add these types in manually, since they just shadow existing ones _vartypes['bool'] = _vartypes.u1 _vartypes['byte'] = _vartypes.u8 _vartypes.func.is_obj = True _vartypes._DEFAULT_TYPE = _vartypes.i32 _vartypes._DEFAULT_RETURN_VALUE = ir.Constant(_vartypes.i32, 0) # if no module, assume platform if not module: module = ir.Module() # Initialize target data for the module. target_data = binding.create_target_data(module.data_layout) # Set up pointer size and u_size vartype for current hardware. _vartypes._pointer_size = (ir.PointerType( _vartypes.u8).get_abi_size(target_data)) _vartypes._pointer_bitwidth = _vartypes._pointer_size * 8 _vartypes._target_data = target_data _vartypes['u_size'] = UnsignedInt(_vartypes._pointer_bitwidth) _vartypes['u_mem'] = UnsignedInt(_vartypes._pointer_size) for _, n in enumerate(_vartypes): if not n.startswith('_'): _vartypes[n].box_id = _ return _vartypes
def __init__(self, module_name): self._data_layout = nvvm.data_layout self._target_data = ll.create_target_data(self._data_layout)
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace, can_dynsized=False): elemcount = reduce(operator.mul, shape, 1) # Check for valid shape for this type of allocation. # Only 1d arrays can be dynamic. dynamic_smem = elemcount <= 0 and can_dynsized and len(shape) == 1 if elemcount <= 0 and not dynamic_smem: raise ValueError("array length <= 0") # Check that we support the requested dtype data_model = context.data_model_manager[dtype] other_supported_type = (isinstance(dtype, (types.Record, types.Boolean)) or isinstance(data_model, models.StructModel) or dtype == types.float16) if dtype not in types.number_domain and not other_supported_type: raise TypeError("unsupported type: %s" % dtype) lldtype = context.get_data_type(dtype) laryty = ir.ArrayType(lldtype, elemcount) if addrspace == nvvm.ADDRSPACE_LOCAL: # Special case local address space allocation to use alloca # NVVM is smart enough to only use local memory if no register is # available dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name) else: lmod = builder.module # Create global variable in the requested address space gvmem = cgutils.add_global_variable(lmod, laryty, symbol_name, addrspace) # Specify alignment to avoid misalignment bug align = context.get_abi_sizeof(lldtype) # Alignment is required to be a power of 2 for shared memory. If it is # not a power of 2 (e.g. for a Record array) then round up accordingly. gvmem.align = 1 << (align - 1).bit_length() if dynamic_smem: gvmem.linkage = 'external' else: ## Comment out the following line to workaround a NVVM bug ## which generates a invalid symbol name when the linkage ## is internal and in some situation. ## See _get_unique_smem_id() # gvmem.linkage = lc.LINKAGE_INTERNAL gvmem.initializer = ir.Constant(laryty, ir.Undefined) # Convert to generic address-space conv = nvvmutils.insert_addrspace_conv(lmod, ir.IntType(8), addrspace) addrspaceptr = gvmem.bitcast(ir.PointerType(ir.IntType(8), addrspace)) dataptr = builder.call(conv, [addrspaceptr]) targetdata = ll.create_target_data(nvvm.data_layout) lldtype = context.get_data_type(dtype) itemsize = lldtype.get_abi_size(targetdata) # Compute strides laststride = itemsize rstrides = [] for i, lastsize in enumerate(reversed(shape)): rstrides.append(laststride) laststride *= lastsize strides = [s for s in reversed(rstrides)] kstrides = [context.get_constant(types.intp, s) for s in strides] # Compute shape if dynamic_smem: # Compute the shape based on the dynamic shared memory configuration. # Unfortunately NVVM does not provide an intrinsic for the # %dynamic_smem_size register, so we must read it using inline # assembly. get_dynshared_size = ir.InlineAsm(ir.FunctionType(ir.IntType(32), []), "mov.u32 $0, %dynamic_smem_size;", '=r', side_effect=True) dynsmem_size = builder.zext(builder.call(get_dynshared_size, []), ir.IntType(64)) # Only 1-D dynamic shared memory is supported so the following is a # sufficient construction of the shape kitemsize = context.get_constant(types.intp, itemsize) kshape = [builder.udiv(dynsmem_size, kitemsize)] else: kshape = [context.get_constant(types.intp, s) for s in shape] # Create array object ndim = len(shape) aryty = types.Array(dtype=dtype, ndim=ndim, layout='C') ary = context.make_array(aryty)(context, builder) context.populate_array(ary, data=builder.bitcast(dataptr, ary.data.type), shape=kshape, strides=kstrides, itemsize=context.get_constant(types.intp, itemsize), meminfo=None) return ary._getvalue()
def init(self): self._internal_codegen = codegen.JITHSACodegen("numba.hsa.jit") self._target_data = \ ll.create_target_data(DATALAYOUT[utils.MACHINE_BITS]) # Override data model manager self.data_model_manager = hsa_data_model_manager