def int_power_func_body(context, builder, x, y): pcounter = cgutils.alloca_once(builder, y.type) presult = cgutils.alloca_once(builder, x.type) result = Constant.int(x.type, 1) counter = y builder.store(counter, pcounter) builder.store(result, presult) bbcond = cgutils.append_basic_block(builder, ".cond") bbbody = cgutils.append_basic_block(builder, ".body") bbexit = cgutils.append_basic_block(builder, ".exit") del counter del result builder.branch(bbcond) with cgutils.goto_block(builder, bbcond): counter = builder.load(pcounter) ONE = Constant.int(counter.type, 1) ZERO = Constant.null(counter.type) builder.store(builder.sub(counter, ONE), pcounter) pred = builder.icmp(lc.ICMP_SGT, counter, ZERO) builder.cbranch(pred, bbbody, bbexit) with cgutils.goto_block(builder, bbbody): result = builder.load(presult) builder.store(builder.mul(result, x), presult) builder.branch(bbcond) builder.position_at_end(bbexit) return builder.load(presult)
def call_function(self, builder, callee, resty, argtys, args, env=None): """ Call the Numba-compiled *callee*. """ if env is None: # This only works with functions that don't use the environment # (nopython functions). env = cgutils.get_null_value(PYOBJECT) is_generator_function = isinstance(resty, types.Generator) retty = self._get_return_argument(callee).type.pointee retvaltmp = cgutils.alloca_once(builder, retty) # initialize return value to zeros builder.store(cgutils.get_null_value(retty), retvaltmp) excinfoptr = cgutils.alloca_once(builder, ir.PointerType(excinfo_t), name="excinfo") arginfo = self.context.get_arg_packer(argtys) args = list(arginfo.as_arguments(builder, args)) realargs = [retvaltmp, excinfoptr, env] + args code = builder.call(callee, realargs) status = self._get_return_status(builder, code, builder.load(excinfoptr)) if is_generator_function: retval = retvaltmp else: retval = builder.load(retvaltmp) out = self.context.get_returned_value(builder, resty, retval) return status, out
def string_as_string_and_size(self, strobj): """ Returns a tuple of ``(ok, buffer, length)``. The ``ok`` is i1 value that is set if ok. The ``buffer`` is a i8* of the output buffer. The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer. """ p_length = cgutils.alloca_once(self.builder, self.py_ssize_t) if PYVERSION >= (3, 0): fnty = Type.function(self.cstring, [self.pyobj, self.py_ssize_t.as_pointer()]) fname = "PyUnicode_AsUTF8AndSize" fn = self._get_function(fnty, name=fname) buffer = self.builder.call(fn, [strobj, p_length]) ok = self.builder.icmp_unsigned('!=', ir.Constant(buffer.type, None), buffer) else: fnty = Type.function(lc.Type.int(), [self.pyobj, self.cstring.as_pointer(), self.py_ssize_t.as_pointer()]) fname = "PyString_AsStringAndSize" fn = self._get_function(fnty, name=fname) # Allocate space for the output parameters p_buffer = cgutils.alloca_once(self.builder, self.cstring) status = self.builder.call(fn, [strobj, p_buffer, p_length]) negone = ir.Constant(status.type, -1) ok = self.builder.icmp_signed("!=", status, negone) buffer = self.builder.load(p_buffer) return (ok, buffer, self.builder.load(p_length))
def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) data = arr.data ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant(types.intp, arrty.ndim)) pointers = cgutils.alloca_once(builder, data.type, size=context.get_constant(types.intp, arrty.ndim)) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte) # Initialize indices and pointers with their start values. for dim in range(ndim): idxptr = cgutils.gep(builder, indices, dim) ptrptr = cgutils.gep(builder, pointers, dim) builder.store(data, ptrptr) builder.store(zero, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic (see issue #846). dim_size = shapes[dim] dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, exhausted) self.indices = indices self.pointers = pointers self.exhausted = exhausted
def impl_iterator_iternext(context, builder, sig, args, result): iter_type = sig.args[0] it = context.make_helper(builder, iter_type, args[0]) p2p_bytes = ll_bytes.as_pointer() iternext_fnty = ir.FunctionType( ll_status, [ll_bytes, p2p_bytes, p2p_bytes] ) iternext = builder.module.get_or_insert_function( iternext_fnty, name='numba_dict_iter_next', ) key_raw_ptr = cgutils.alloca_once(builder, ll_bytes) val_raw_ptr = cgutils.alloca_once(builder, ll_bytes) status = builder.call(iternext, (it.state, key_raw_ptr, val_raw_ptr)) # TODO: no handling of error state i.e. mutated dictionary # all errors are treated as exhausted iterator is_valid = builder.icmp_unsigned('==', status, status.type(0)) result.set_valid(is_valid) with builder.if_then(is_valid): yield_type = iter_type.yield_type key_ty, val_ty = iter_type.parent.keyvalue_type dm_key = context.data_model_manager[key_ty] dm_val = context.data_model_manager[val_ty] key_ptr = builder.bitcast( builder.load(key_raw_ptr), dm_key.get_data_type().as_pointer(), ) val_ptr = builder.bitcast( builder.load(val_raw_ptr), dm_val.get_data_type().as_pointer(), ) key = dm_key.load_from_data_pointer(builder, key_ptr) val = dm_val.load_from_data_pointer(builder, val_ptr) # All dict iterators use this common implementation. # Their differences are resolved here. if isinstance(iter_type.iterable, DictItemsIterableType): # .items() tup = context.make_tuple(builder, yield_type, [key, val]) result.yield_(tup) elif isinstance(iter_type.iterable, DictKeysIterableType): # .keys() result.yield_(key) elif isinstance(iter_type.iterable, DictValuesIterableType): # .values() result.yield_(val) else: # unreachable raise AssertionError('unknown type: {}'.format(iter_type.iterable))
def codegen(context, builder, sig, args): fnty = ir.FunctionType( ll_status, [ll_dict_type, ll_bytes, ll_hash, ll_bytes, ll_bytes], ) [d, key, hashval, val] = args [td, tkey, thashval, tval] = sig.args fn = builder.module.get_or_insert_function(fnty, name='numba_dict_insert') dm_key = context.data_model_manager[tkey] dm_val = context.data_model_manager[tval] data_key = dm_key.as_data(builder, key) data_val = dm_val.as_data(builder, val) ptr_key = cgutils.alloca_once_value(builder, data_key) ptr_val = cgutils.alloca_once_value(builder, data_val) # TODO: the ptr_oldval is not used. needed for refct ptr_oldval = cgutils.alloca_once(builder, data_val.type) dp = _dict_get_data(context, builder, td, d) status = builder.call( fn, [ dp, _as_bytes(builder, ptr_key), hashval, _as_bytes(builder, ptr_val), _as_bytes(builder, ptr_oldval), ], ) return status
def impl_dict_getiter(context, builder, sig, args): """Implement iter(Dict). Semantically equivalent to dict.keys() """ [td] = sig.args [d] = args iterablety = types.DictKeysIterableType(td) it = context.make_helper(builder, iterablety.iterator_type) fnty = ir.FunctionType( ir.VoidType(), [ll_dictiter_type, ll_dict_type], ) fn = builder.module.get_or_insert_function(fnty, name='numba_dict_iter') proto = ctypes.CFUNCTYPE(ctypes.c_size_t) dictiter_sizeof = proto(_helperlib.c_helpers['dict_iter_sizeof']) state_type = ir.ArrayType(ir.IntType(8), dictiter_sizeof()) pstate = cgutils.alloca_once(builder, state_type, zfill=True) it.state = _as_bytes(builder, pstate) it.parent = d dp = _dict_get_data(context, builder, iterablety.parent, args[0]) builder.call(fn, [it.state, dp]) return impl_ret_borrowed( context, builder, sig.return_type, it._getvalue(), )
def alloc_timedelta_result(builder, name='ret'): """ Allocate a NaT-initialized datetime64 (or timedelta64) result slot. """ ret = cgutils.alloca_once(builder, TIMEDELTA64, name=name) builder.store(NAT, ret) return ret
def timedelta_floor_div_timedelta(context, builder, sig, args): [va, vb] = args [ta, tb] = sig.args ll_ret_type = context.get_value_type(sig.return_type) not_nan = are_not_nat(builder, [va, vb]) ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') zero = Constant.int(ll_ret_type, 0) one = Constant.int(ll_ret_type, 1) builder.store(zero, ret) with cgutils.if_likely(builder, not_nan): va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) # is the denominator zero or NaT? denom_ok = builder.not_(builder.icmp_signed('==', vb, zero)) with cgutils.if_likely(builder, denom_ok): # is either arg negative? vaneg = builder.icmp_signed('<', va, zero) neg = builder.or_(vaneg, builder.icmp_signed('<', vb, zero)) with builder.if_else(neg) as (then, otherwise): with then: # one or more value negative with builder.if_else(vaneg) as (negthen, negotherwise): with negthen: top = builder.sub(va, one) div = builder.sdiv(top, vb) builder.store(div, ret) with negotherwise: top = builder.add(va, one) div = builder.sdiv(top, vb) builder.store(div, ret) with otherwise: div = builder.sdiv(va, vb) builder.store(div, ret) res = builder.load(ret) return impl_ret_untracked(context, builder, sig.return_type, res)
def iternext_zip(context, builder, sig, args, result): [zip_type] = sig.args [zipobj] = args zipobj = context.make_helper(builder, zip_type, value=zipobj) if len(zipobj) == 0: # zip() is an empty iterator result.set_exhausted() return p_ret_tup = cgutils.alloca_once(builder, context.get_value_type(zip_type.yield_type)) p_is_valid = cgutils.alloca_once_value(builder, value=cgutils.true_bit) for i, (iterobj, srcty) in enumerate(zip(zipobj, zip_type.source_types)): is_valid = builder.load(p_is_valid) # Avoid calling the remaining iternext if a iterator has been exhausted with builder.if_then(is_valid): srcres = call_iternext(context, builder, srcty, iterobj) is_valid = builder.and_(is_valid, srcres.is_valid()) builder.store(is_valid, p_is_valid) val = srcres.yielded_value() ptr = cgutils.gep_inbounds(builder, p_ret_tup, 0, i) builder.store(val, ptr) is_valid = builder.load(p_is_valid) result.set_valid(is_valid) with builder.if_then(is_valid): result.yield_(builder.load(p_ret_tup))
def to_native_arg(self, obj, typ): if isinstance(typ, types.Record): # Generate a dummy integer type that has the size of Py_buffer dummy_py_buffer_type = Type.int(_helperlib.py_buffer_size * 8) # Allocate the Py_buffer py_buffer = cgutils.alloca_once(self.builder, dummy_py_buffer_type) # Zero-fill the py_buffer. where the obj field in Py_buffer is NULL # PyBuffer_Release has no effect. zeroed_buffer = lc.Constant.null(dummy_py_buffer_type) self.builder.store(zeroed_buffer, py_buffer) buf_as_voidptr = self.builder.bitcast(py_buffer, self.voidptr) ptr = self.extract_record_data(obj, buf_as_voidptr) with cgutils.if_unlikely(self.builder, cgutils.is_null(self.builder, ptr)): self.builder.ret(ptr) ltyp = self.context.get_value_type(typ) val = cgutils.init_record_by_ptr(self.builder, ltyp, ptr) def dtor(): self.release_record_buffer(buf_as_voidptr) else: val = self.to_native_value(obj, typ) def dtor(): pass return val, dtor
def _gauss_impl(context, builder, sig, args, state): # The type for all computations (either float or double) ty = sig.return_type llty = context.get_data_type(ty) state_ptr = get_state_ptr(context, builder, state) _random = {"py": random.random, "np": np.random.random}[state] ret = cgutils.alloca_once(builder, llty, name="result") gauss_ptr = get_gauss_ptr(builder, state_ptr) has_gauss_ptr = get_has_gauss_ptr(builder, state_ptr) has_gauss = cgutils.is_true(builder, builder.load(has_gauss_ptr)) with builder.if_else(has_gauss) as (then, otherwise): with then: # if has_gauss: return it builder.store(builder.load(gauss_ptr), ret) builder.store(const_int(0), has_gauss_ptr) with otherwise: # if not has_gauss: compute a pair of numbers using the Box-Muller # transform; keep one and return the other pair = context.compile_internal(builder, _gauss_pair_impl(_random), signature(types.UniTuple(ty, 2)), ()) first, second = cgutils.unpack_tuple(builder, pair, 2) builder.store(first, gauss_ptr) builder.store(second, ret) builder.store(const_int(1), has_gauss_ptr) mu, sigma = args return builder.fadd(mu, builder.fmul(sigma, builder.load(ret)))
def dot_2_vv(context, builder, sig, args, conjugate=False): """ np.dot(vector, vector) np.vdot(vector, vector) """ aty, bty = sig.args dtype = sig.return_type a = make_array(aty)(context, builder, args[0]) b = make_array(bty)(context, builder, args[1]) n, = cgutils.unpack_tuple(builder, a.shape) def check_args(a, b): m, = a.shape n, = b.shape if m != n: raise ValueError("incompatible array sizes for np.dot(a, b) " "(vector * vector)") context.compile_internal(builder, check_args, signature(types.none, *sig.args), args) check_c_int(context, builder, n) out = cgutils.alloca_once(builder, context.get_value_type(dtype)) call_xxdot(context, builder, conjugate, dtype, n, a.data, b.data, out) return builder.load(out)
def to_native_optional(self, obj, typ): """ Convert object *obj* to a native optional structure. """ noneval = self.context.make_optional_none(self.builder, typ.type) is_not_none = self.builder.icmp(lc.ICMP_NE, obj, self.borrow_none()) retptr = cgutils.alloca_once(self.builder, noneval.type) errptr = cgutils.alloca_once_value(self.builder, cgutils.false_bit) with cgutils.ifelse(self.builder, is_not_none) as (then, orelse): with then: native = self.to_native_value(obj, typ.type) just = self.context.make_optional_value(self.builder, typ.type, native.value) self.builder.store(just, retptr) self.builder.store(native.is_error, errptr) with orelse: self.builder.store(ir.Constant(noneval.type, ir.Undefined), retptr) self.builder.store(noneval, retptr) if native.cleanup is not None: def cleanup(): with cgutils.ifthen(self.builder, is_not_none): native.cleanup() else: cleanup = None ret = self.builder.load(retptr) return NativeValue(ret, is_error=self.builder.load(errptr), cleanup=cleanup)
def _long_from_native_int(self, ival, func_name, native_int_type, signed): fnty = Type.function(self.pyobj, [native_int_type]) fn = self._get_function(fnty, name=func_name) resptr = cgutils.alloca_once(self.builder, self.pyobj) if PYVERSION < (3, 0): # Under Python 2, we try to return a PyInt object whenever # the given number fits in a C long. pyint_fnty = Type.function(self.pyobj, [self.long]) pyint_fn = self._get_function(pyint_fnty, name="PyInt_FromLong") long_max = Constant.int(native_int_type, _helperlib.long_max) if signed: long_min = Constant.int(native_int_type, _helperlib.long_min) use_pyint = self.builder.and_( self.builder.icmp(lc.ICMP_SGE, ival, long_min), self.builder.icmp(lc.ICMP_SLE, ival, long_max), ) else: use_pyint = self.builder.icmp(lc.ICMP_ULE, ival, long_max) with self.builder.if_else(use_pyint) as (then, otherwise): with then: downcast_ival = self.builder.trunc(ival, self.long) res = self.builder.call(pyint_fn, [downcast_ival]) self.builder.store(res, resptr) with otherwise: res = self.builder.call(fn, [ival]) self.builder.store(res, resptr) else: fn = self._get_function(fnty, name=func_name) self.builder.store(self.builder.call(fn, [ival]), resptr) return self.builder.load(resptr)
def get_constant_generic(self, builder, ty, val): """ Return a LLVM constant representing value *val* of Numba type *ty*. """ if isinstance(ty, types.ExternalFunctionPointer): ptrty = self.get_function_pointer_type(ty) ptrval = ty.get_pointer(val) return builder.inttoptr(self.get_constant(types.intp, ptrval), ptrty) elif isinstance(ty, types.Array): return self.make_constant_array(builder, ty, val) elif isinstance(ty, types.Dummy): return self.get_dummy_value() elif self.is_struct_type(ty): struct = self.get_constant_struct(builder, ty, val) if isinstance(ty, types.Record): ptrty = self.data_model_manager[ty].get_data_type() ptr = cgutils.alloca_once(builder, ptrty) builder.store(struct, ptr) return ptr return struct else: return self.get_constant(ty, val)
def codegen(context, builder, signature, args): tup, idx, val = args stack = alloca_once(builder, tup.type) builder.store(tup, stack) # Unsafe load on unchecked bounds. Poison value maybe returned. offptr = builder.gep(stack, [idx.type(0), idx], inbounds=True) builder.store(val, offptr) return builder.load(stack)
def poisson_impl(context, builder, sig, args): state_ptr = get_np_state_ptr(context, builder) retptr = cgutils.alloca_once(builder, int64_t, name="ret") bbcont = builder.append_basic_block("bbcont") bbend = builder.append_basic_block("bbend") if len(args) == 1: lam, = args big_lam = builder.fcmp_ordered('>=', lam, ir.Constant(double, 10.0)) with builder.if_then(big_lam): # For lambda >= 10.0, we switch to a more accurate # algorithm (see _random.c). fnty = ir.FunctionType(int64_t, (rnd_state_ptr_t, double)) fn = builder.function.module.get_or_insert_function(fnty, "numba_poisson_ptrs") ret = builder.call(fn, (state_ptr, lam)) builder.store(ret, retptr) builder.branch(bbend) builder.branch(bbcont) builder.position_at_end(bbcont) _random = np.random.random _exp = math.exp def poisson_impl(lam): """Numpy's algorithm for poisson() on small *lam*. This method is invoked only if the parameter lambda of the distribution is small ( < 10 ). The algorithm used is described in "Knuth, D. 1969. 'Seminumerical Algorithms. The Art of Computer Programming' vol 2. """ if lam < 0.0: raise ValueError("poisson(): lambda < 0") if lam == 0.0: return 0 enlam = _exp(-lam) X = 0 prod = 1.0 while 1: U = _random() prod *= U if prod <= enlam: return X X += 1 if len(args) == 0: sig = signature(sig.return_type, types.float64) args = (ir.Constant(double, 1.0),) ret = context.compile_internal(builder, poisson_impl, sig, args) builder.store(ret, retptr) builder.branch(bbend) builder.position_at_end(bbend) res = builder.load(retptr) return impl_ret_untracked(context, builder, sig.return_type, res)
def from_range_state(cls, context, builder, state): """ Create a RangeIter initialized from the given RangeState *state*. """ self = cls(context, builder) start = state.start stop = state.stop step = state.step startptr = cgutils.alloca_once(builder, start.type) builder.store(start, startptr) countptr = cgutils.alloca_once(builder, start.type) self.iter = startptr self.stop = stop self.step = step self.count = countptr diff = builder.sub(stop, start) zero = context.get_constant(int_type, 0) one = context.get_constant(int_type, 1) pos_diff = builder.icmp(lc.ICMP_SGT, diff, zero) pos_step = builder.icmp(lc.ICMP_SGT, step, zero) sign_differs = builder.xor(pos_diff, pos_step) zero_step = builder.icmp(lc.ICMP_EQ, step, zero) with cgutils.if_unlikely(builder, zero_step): # step shouldn't be zero context.call_conv.return_user_exc(builder, ValueError, ("range() arg 3 must not be zero",)) with builder.if_else(sign_differs) as (then, orelse): with then: builder.store(zero, self.count) with orelse: rem = builder.srem(diff, step) rem = builder.select(pos_diff, rem, builder.neg(rem)) uneven = builder.icmp(lc.ICMP_SGT, rem, zero) newcount = builder.add(builder.sdiv(diff, step), builder.select(uneven, one, zero)) builder.store(newcount, self.count) return self
def call_function(self, builder, callee, resty, argtys, args): retty = callee.args[0].type.pointee retval = cgutils.alloca_once(builder, retty) args = [self.get_value_as_argument(builder, ty, arg) for ty, arg in zip(argtys, args)] realargs = [retval] + list(args) code = builder.call(callee, realargs) status = self.get_return_status(builder, code) return status, builder.load(retval)
def from_data(self, builder, value): ty = self.get_value_type() resalloca = cgutils.alloca_once(builder, ty) cond = builder.icmp_unsigned('==', value, value.type(0)) with builder.if_else(cond) as (then, otherwise): with then: builder.store(ty(0), resalloca) with otherwise: builder.store(ty(1), resalloca) return builder.load(resalloca)
def gil_ensure(self): """ Ensure the GIL is acquired. The returned value must be consumed by gil_release(). """ gilptrty = Type.pointer(self.gil_state) fnty = Type.function(Type.void(), [gilptrty]) fn = self._get_function(fnty, "numba_gil_ensure") gilptr = cgutils.alloca_once(self.builder, self.gil_state) self.builder.call(fn, [gilptr]) return gilptr
def frexp_impl(context, builder, sig, args): val, = args fltty = context.get_data_type(sig.args[0]) intty = context.get_data_type(sig.return_type[1]) expptr = cgutils.alloca_once(builder, intty, name="exp") fnty = Type.function(fltty, (fltty, Type.pointer(intty))) fname = {"float": "numba_frexpf", "double": "numba_frexp"}[str(fltty)] fn = builder.module.get_or_insert_function(fnty, name=fname) res = builder.call(fn, (val, expptr)) res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr))) return impl_ret_untracked(context, builder, sig.return_type, res)
def getiter_range64_impl(context, builder, sig, args): (value,) = args state = RangeState64(context, builder, value) iterobj = RangeIter64(context, builder) start = state.start stop = state.stop step = state.step startptr = cgutils.alloca_once(builder, start.type) builder.store(start, startptr) countptr = cgutils.alloca_once(builder, start.type) iterobj.iter = startptr iterobj.stop = stop iterobj.step = step iterobj.count = countptr return getiter_range_generic(context, builder, iterobj, start, stop, step)
def print_charseq(context, builder, sig, args): [x] = args py = context.get_python_api(builder) xp = cgutils.alloca_once(builder, x.type) builder.store(x, xp) byteptr = builder.bitcast(xp, Type.pointer(Type.int(8))) size = context.get_constant(types.intp, x.type.elements[0].count) cstr = py.bytes_from_string_and_size(byteptr, size) py.print_object(cstr) py.decref(cstr) return context.get_dummy_value()
def make_array_flatiter(context, builder, arrty, arr): flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty)) flatiter = flatitercls(context, builder) iters = cgutils.alloca_once(builder, context.get_value_type(types.intp), size=context.get_constant(types.intp, arrty.ndim)) arrayptr = cgutils.alloca_once(builder, arr.type) builder.store(arr, arrayptr) zero = context.get_constant(types.intp, 0) for i in range(arrty.ndim): p = builder.gep(iters, [context.get_constant(types.intp, i)]) builder.store(zero, p) flatiter.array = arrayptr flatiter.iters = iters return flatiter._getvalue()
def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) data = arr.data ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant(types.intp, arrty.ndim)) pointers = cgutils.alloca_once(builder, data.type, size=context.get_constant(types.intp, arrty.ndim)) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) empty = cgutils.alloca_once_value(builder, cgutils.false_byte) # Initialize each dimension with the next index and pointer # values. For the last (inner) dimension, this is 0 and the # start pointer, for the other dimensions, this is 1 and the # pointer to the next subarray after start. for dim in range(ndim): idxptr = cgutils.gep(builder, indices, dim) ptrptr = cgutils.gep(builder, pointers, dim) if dim == ndim - 1: builder.store(zero, idxptr) builder.store(data, ptrptr) else: p = cgutils.pointer_add(builder, data, strides[dim]) builder.store(p, ptrptr) builder.store(one, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic (see issue #846). dim_size = shapes[dim] dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, empty) self.indices = indices self.pointers = pointers self.empty = empty
def to_native_int(self, obj, typ): ll_type = self.context.get_argument_type(typ) val = cgutils.alloca_once(self.builder, ll_type) longobj = self.number_long(obj) with self.if_object_ok(longobj): if typ.signed: llval = self.long_as_longlong(longobj) else: llval = self.long_as_ulonglong(longobj) self.decref(longobj) self.builder.store(self.builder.trunc(llval, ll_type), val) return self.builder.load(val)
def year_to_days(builder, year_val): """ Given a year *year_val* (offset to 1970), return the number of days since the 1970 epoch. """ # The algorithm below is copied from Numpy's get_datetimestruct_days() # (src/multiarray/datetime.c) ret = cgutils.alloca_once(builder, TIMEDELTA64) # First approximation days = scale_by_constant(builder, year_val, 365) # Adjust for leap years with cgutils.ifelse(builder, cgutils.is_neg_int(builder, year_val)) \ as (if_neg, if_pos): with if_pos: # At or after 1970: # 1968 is the closest leap year before 1970. # Exclude the current year, so add 1. from_1968 = add_constant(builder, year_val, 1) # Add one day for each 4 years p_days = builder.add(days, unscale_by_constant(builder, from_1968, 4)) # 1900 is the closest previous year divisible by 100 from_1900 = add_constant(builder, from_1968, 68) # Subtract one day for each 100 years p_days = builder.sub(p_days, unscale_by_constant(builder, from_1900, 100)) # 1600 is the closest previous year divisible by 400 from_1600 = add_constant(builder, from_1900, 300) # Add one day for each 400 years p_days = builder.add(p_days, unscale_by_constant(builder, from_1600, 400)) builder.store(p_days, ret) with if_neg: # Before 1970: # NOTE `year_val` is negative, and so will be `from_1972` and `from_2000`. # 1972 is the closest later year after 1970. # Include the current year, so subtract 2. from_1972 = add_constant(builder, year_val, -2) # Subtract one day for each 4 years (`from_1972` is negative) n_days = builder.add(days, unscale_by_constant(builder, from_1972, 4)) # 2000 is the closest later year divisible by 100 from_2000 = add_constant(builder, from_1972, -28) # Add one day for each 100 years n_days = builder.sub(n_days, unscale_by_constant(builder, from_2000, 100)) # 2000 is also the closest later year divisible by 400 # Subtract one day for each 400 years n_days = builder.add(n_days, unscale_by_constant(builder, from_2000, 400)) builder.store(n_days, ret) return builder.load(ret)
def timedelta_over_timedelta(context, builder, sig, args): [va, vb] = args [ta, tb] = sig.args not_nan = are_not_nat(builder, [va, vb]) ll_ret_type = context.get_value_type(sig.return_type) ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') builder.store(Constant.real(ll_ret_type, float('nan')), ret) with cgutils.if_likely(builder, not_nan): va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) va = builder.sitofp(va, ll_ret_type) vb = builder.sitofp(vb, ll_ret_type) builder.store(builder.fdiv(va, vb), ret) return builder.load(ret)
def timedelta_over_timedelta(context, builder, sig, args): [va, vb] = args [ta, tb] = sig.args not_nan = are_not_nat(builder, [va, vb]) ll_ret_type = context.get_value_type(sig.return_type) ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') builder.store(Constant.real(ll_ret_type, float('nan')), ret) with cgutils.if_likely(builder, not_nan): va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) va = builder.sitofp(va, ll_ret_type) vb = builder.sitofp(vb, ll_ret_type) builder.store(builder.fdiv(va, vb), ret) res = builder.load(ret) return impl_ret_untracked(context, builder, sig.return_type, res)
def getiter_array(context, builder, sig, args): [arrayty] = sig.args [array] = args iterobj = make_arrayiter_cls(sig.return_type)(context, builder) zero = context.get_constant(types.intp, 0) indexptr = cgutils.alloca_once(builder, zero.type) builder.store(zero, indexptr) iterobj.index = indexptr iterobj.array = array return iterobj._getvalue()
def frexp_impl(context, builder, sig, args): val, = args fltty = context.get_data_type(sig.args[0]) intty = context.get_data_type(sig.return_type[1]) expptr = cgutils.alloca_once(builder, intty, name='exp') fnty = Type.function(fltty, (fltty, Type.pointer(intty))) fname = { "float": "numba_frexpf", "double": "numba_frexp", }[str(fltty)] fn = builder.module.get_or_insert_function(fnty, name=fname) res = builder.call(fn, (val, expptr)) res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr))) return impl_ret_untracked(context, builder, sig.return_type, res)
def codegen(context, builder, sig, args): fnty = ir.FunctionType( ll_status, [ll_dict_type, ll_bytes, ll_bytes], ) [d] = args [td] = sig.args fn = builder.module.get_or_insert_function(fnty, name='numba_dict_popitem') dm_key = context.data_model_manager[td.key_type] dm_val = context.data_model_manager[td.value_type] ptr_key = cgutils.alloca_once(builder, dm_key.get_data_type()) ptr_val = cgutils.alloca_once(builder, dm_val.get_data_type()) dp = _dict_get_data(context, builder, td, d) status = builder.call( fn, [ dp, _as_bytes(builder, ptr_key), _as_bytes(builder, ptr_val), ], ) out = context.make_optional_none(builder, keyvalty) pout = cgutils.alloca_once_value(builder, out) cond = builder.icmp_signed('==', status, status.type(int(Status.OK))) with builder.if_then(cond): key = dm_key.load_from_data_pointer(builder, ptr_key) val = dm_val.load_from_data_pointer(builder, ptr_val) keyval = context.make_tuple(builder, keyvalty, [key, val]) optkeyval = context.make_optional_value(builder, keyvalty, keyval) builder.store(optkeyval, pout) out = builder.load(pout) return cgutils.pack_struct(builder, [status, out])
def lower_dist_reduce(context, builder, sig, args): val_typ = args[0].type op_typ = args[1].type target_typ = sig.args[0] if isinstance(target_typ, IndexValueType): target_typ = target_typ.val_typ supported_typs = [types.int32, types.float32, types.float64] import sys if not sys.platform.startswith('win'): # long is 4 byte on Windows supported_typs.append(types.int64) if target_typ not in supported_typs: # pragma: no cover raise TypeError( "argmin/argmax not supported for type {}".format(target_typ)) in_ptr = cgutils.alloca_once(builder, val_typ) out_ptr = cgutils.alloca_once(builder, val_typ) builder.store(args[0], in_ptr) # cast to char * in_ptr = builder.bitcast(in_ptr, lir.IntType(8).as_pointer()) out_ptr = builder.bitcast(out_ptr, lir.IntType(8).as_pointer()) typ_enum = _numba_to_c_type_map[target_typ] typ_arg = cgutils.alloca_once_value( builder, lir.Constant(lir.IntType(32), typ_enum)) fnty = lir.FunctionType(lir.VoidType(), [ lir.IntType(8).as_pointer(), lir.IntType(8).as_pointer(), op_typ, lir.IntType(32) ]) fn = builder.module.get_or_insert_function(fnty, name="hpat_dist_reduce") builder.call(fn, [in_ptr, out_ptr, args[1], builder.load(typ_arg)]) # cast back to value type out_ptr = builder.bitcast(out_ptr, val_typ.as_pointer()) return builder.load(out_ptr)
def h5_write(context, builder, sig, args): # extra last arg type for type enum arg_typs = [ lir.IntType(32), lir.IntType(32), lir.IntType(32), lir.IntType(64).as_pointer(), lir.IntType(64).as_pointer(), lir.IntType(64), lir.IntType(8).as_pointer(), lir.IntType(32) ] fnty = lir.FunctionType(lir.IntType(32), arg_typs) fn = builder.module.get_or_insert_function(fnty, name="hpat_h5_write") out = make_array(sig.args[6])(context, builder, args[6]) # store size vars array struct to pointer count_ptr = cgutils.alloca_once(builder, args[3].type) builder.store(args[3], count_ptr) size_ptr = cgutils.alloca_once(builder, args[4].type) builder.store(args[4], size_ptr) # store an int to specify data type typ_enum = _h5_typ_table[sig.args[6].dtype] typ_arg = cgutils.alloca_once_value( builder, lir.Constant(lir.IntType(32), typ_enum)) call_args = [ args[0], args[1], args[2], builder.bitcast(count_ptr, lir.IntType(64).as_pointer()), builder.bitcast(size_ptr, lir.IntType(64).as_pointer()), args[5], builder.bitcast(out.data, lir.IntType(8).as_pointer()), builder.load(typ_arg) ] return builder.call(fn, call_args)
def call_function(self, builder, callee, resty, argtys, args): """ Call the Numba-compiled *callee*. """ # XXX better fix for callees that are not function values # (pointers to function; thus have no `.args` attribute) retty = self._get_return_argument(callee.function_type).pointee retvaltmp = cgutils.alloca_once(builder, retty) # initialize return value to zeros builder.store(cgutils.get_null_value(retty), retvaltmp) excinfoptr = cgutils.alloca_once(builder, ir.PointerType(excinfo_t), name="excinfo") arginfo = self._get_arg_packer(argtys) args = list(arginfo.as_arguments(builder, args)) realargs = [retvaltmp, excinfoptr] + args code = builder.call(callee, realargs) status = self._get_return_status(builder, code, builder.load(excinfoptr)) retval = builder.load(retvaltmp) out = self.context.get_returned_value(builder, resty, retval) return status, out
def nt2nd(context, builder, ptr, ary_type): '''Generate ir code to convert a pointer-to-daal-numeric-table to a ndarray''' # we need to prepare the shape array and a pointer shape_type = lir.ArrayType(lir.IntType(64), 2) shape = cgutils.alloca_once(builder, shape_type) data = cgutils.alloca_once(builder, lir.DoubleType().as_pointer()) assert(ary_type in [dtable_type, ftable_type, itable_type,]) # we also need indicate the type of the array (e.g. what we expect) d4ptype = context.get_constant(types.byte, d4ptypes[ary_type]) # we can now declare and call our conversion function fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer(), # actually pointer to numeric table lir.DoubleType().as_pointer().as_pointer(), shape_type.as_pointer(), lir.IntType(8)]) fn = builder.module.get_or_insert_function(fnty, name='to_c_array') builder.call(fn, [ptr, data, shape, d4ptype]) # convert to ndarray shape = cgutils.unpack_tuple(builder, builder.load(shape)) ary = _empty_nd_impl(context, builder, ary_type, shape) cgutils.raw_memcpy(builder, ary.data, builder.load(data), ary.nitems, ary.itemsize, align=1) # we are done! return impl_ret_new_ref(context, builder, ary_type, ary._getvalue())
def call_function(self, builder, callee, resty, argtys, args, env=None): """ Call the Numba-compiled *callee*, using the same calling convention as in get_function_type(). """ assert env is None retty = callee.args[0].type.pointee retval = cgutils.alloca_once(builder, retty) # initialize return value builder.store(lc.Constant.null(retty), retval) args = [self.get_value_as_argument(builder, ty, arg) for ty, arg in zip(argtys, args)] realargs = [retval] + list(args) code = builder.call(callee, realargs) status = self.get_return_status(builder, code) return status, builder.load(retval)
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace, can_dynsized=False): elemcount = reduce(operator.mul, shape) lldtype = context.get_data_type(dtype) laryty = Type.array(lldtype, elemcount) if addrspace == nvvm.ADDRSPACE_LOCAL: # Special case local addrespace allocation to use alloca # NVVM is smart enough to only use local memory if no register is # available dataptr = cgutils.alloca_once(builder, laryty, name=symbol_name) else: lmod = builder.module # Create global variable in the requested address-space gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace) # Specify alignment to avoid misalignment bug gvmem.align = context.get_abi_sizeof(lldtype) if elemcount <= 0: if can_dynsized: # dynamic shared memory gvmem.linkage = lc.LINKAGE_EXTERNAL else: raise ValueError("array length <= 0") else: ## Comment out the following line to workaround a NVVM bug ## which generates a invalid symbol name when the linkage ## is internal and in some situation. ## See _get_unique_smem_id() # gvmem.linkage = lc.LINKAGE_INTERNAL gvmem.initializer = lc.Constant.undef(laryty) if dtype not in types.number_domain: raise TypeError("unsupported type: %s" % dtype) # Convert to generic address-space conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace) addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace)) dataptr = builder.call(conv, [addrspaceptr]) return _make_array(context, builder, dataptr, dtype, shape)
def _unbox_array_list_str(obj, c): # typ = list_string_array_type # from unbox_list errorptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) listptr = cgutils.alloca_once(c.builder, c.context.get_value_type(typ)) # get size of array arr_size_fnty = LLType.function(c.pyapi.py_ssize_t, [c.pyapi.pyobj]) arr_size_fn = c.pyapi._get_function(arr_size_fnty, name="array_size") size = c.builder.call(arr_size_fn, [obj]) # cgutils.printf(c.builder, 'size %d\n', size) _python_array_obj_to_native_list(typ, obj, c, size, listptr, errorptr) return NativeValue(c.builder.load(listptr), is_error=c.builder.load(errorptr))
def impl_iterator_iternext(context, builder, sig, args, result): iter_type = sig.args[0] it = context.make_helper(builder, iter_type, args[0]) iternext_fnty = ir.FunctionType( ll_status, [ll_listiter_type, ll_bytes.as_pointer()] ) iternext = builder.module.get_or_insert_function( iternext_fnty, name='numba_list_iter_next', ) item_raw_ptr = cgutils.alloca_once(builder, ll_bytes) status = builder.call(iternext, (it.state, item_raw_ptr)) # check for list mutation mutated_status = status.type(int(ListStatus.LIST_ERR_MUTATED)) is_mutated = builder.icmp_signed('==', status, mutated_status) with builder.if_then(is_mutated, likely=False): context.call_conv.return_user_exc( builder, RuntimeError, ("list was mutated during iteration",)) # if the list wasn't mutated it is either fine or the iterator was # exhausted ok_status = status.type(int(ListStatus.LIST_OK)) is_valid = builder.icmp_signed('==', status, ok_status) result.set_valid(is_valid) with builder.if_then(is_valid, likely=True): item_ty = iter_type.parent.item_type dm_item = context.data_model_manager[item_ty] item_ptr = builder.bitcast( builder.load(item_raw_ptr), dm_item.get_data_type().as_pointer(), ) item = dm_item.load_from_data_pointer(builder, item_ptr) if isinstance(iter_type.iterable, ListTypeIterableType): result.yield_(item) else: # unreachable raise AssertionError('unknown type: {}'.format(iter_type.iterable))
def call_function(self, builder, callee, resty, argtys, args, env=None): """ Call the Numba-compiled *callee*. """ assert env is None retty = callee.args[0].type.pointee retvaltmp = cgutils.alloca_once(builder, retty) # initialize return value builder.store(cgutils.get_null_value(retty), retvaltmp) args = [self.context.get_value_as_argument(builder, ty, arg) for ty, arg in zip(argtys, args)] realargs = [retvaltmp] + list(args) code = builder.call(callee, realargs) status = self._get_return_status(builder, code) retval = builder.load(retvaltmp) out = self.context.get_returned_value(builder, resty, retval) return status, out
def call_function(self, builder, callee, resty, argtys, args): """ Call the Numba-compiled *callee*. """ retty = callee.args[0].type.pointee retvaltmp = cgutils.alloca_once(builder, retty) # initialize return value builder.store(cgutils.get_null_value(retty), retvaltmp) arginfo = self._get_arg_packer(argtys) args = arginfo.as_arguments(builder, args) realargs = [retvaltmp] + list(args) code = builder.call(callee, realargs) status = self._get_return_status(builder, code) retval = builder.load(retvaltmp) out = self.context.get_returned_value(builder, resty, retval) return status, out
def int_print_impl(context, builder, sig, args): [x] = args [srctype] = sig.args mod = builder.module vprint = nvvmutils.declare_vprint(mod) if srctype in types.unsigned_domain: rawfmt = "%llu" dsttype = types.uint64 else: rawfmt = "%lld" dsttype = types.int64 fmt = context.insert_string_const_addrspace(builder, rawfmt) lld = context.cast(builder, x, srctype, dsttype) valptr = cgutils.alloca_once(builder, context.get_value_type(dsttype)) builder.store(lld, valptr) builder.call(vprint, [fmt, builder.bitcast(valptr, voidptr)]) return context.get_dummy_value()
def unpack_value(self, builder, ty, ptr): """Unpack data from array storage """ if isinstance(ty, types.Record): vt = self.get_value_type(ty) tmp = cgutils.alloca_once(builder, vt) dataptr = cgutils.inbound_gep(builder, ptr, 0, 0) builder.store(dataptr, cgutils.inbound_gep(builder, tmp, 0, 0)) return builder.load(tmp) assert cgutils.is_pointer(ptr.type) value = builder.load(ptr) if ty == types.boolean: return builder.trunc(value, Type.int(1)) else: return value
def reduce_datetime_for_unit(builder, dt_val, src_unit, dest_unit): dest_unit_code = npdatetime.DATETIME_UNITS[dest_unit] src_unit_code = npdatetime.DATETIME_UNITS[src_unit] if dest_unit_code < 2 or src_unit_code >= 2: return dt_val, src_unit # Need to compute the day ordinal for *dt_val* if src_unit_code == 0: # Years to days year_val = dt_val days_val = year_to_days(builder, year_val) else: # Months to days leap_array = cgutils.global_constant(builder, "leap_year_months_acc", leap_year_months_acc) normal_array = cgutils.global_constant(builder, "normal_year_months_acc", normal_year_months_acc) days = cgutils.alloca_once(builder, TIMEDELTA64) # First compute year number and month number year, month = cgutils.divmod_by_constant(builder, dt_val, 12) # Then deduce the number of days with builder.if_else(is_leap_year(builder, year)) as (then, otherwise): with then: addend = builder.load( cgutils.gep(builder, leap_array, 0, month, inbounds=True)) builder.store(addend, days) with otherwise: addend = builder.load( cgutils.gep(builder, normal_array, 0, month, inbounds=True)) builder.store(addend, days) days_val = year_to_days(builder, year) days_val = builder.add(days_val, builder.load(days)) if dest_unit_code == 2: # Need to scale back to weeks weeks, _ = cgutils.divmod_by_constant(builder, days_val, 7) return weeks, 'W' else: return days_val, 'D'
def string_split_impl(context, builder, sig, args): nitems = cgutils.alloca_once(builder, lir.IntType(64)) # input str, sep, size pointer fnty = lir.FunctionType(lir.IntType(8).as_pointer().as_pointer(), [lir.IntType(8).as_pointer(), lir.IntType(8).as_pointer(), lir.IntType(64).as_pointer()]) fn = builder.module.get_or_insert_function(fnty, name="str_split") ptr = builder.call(fn, args+[nitems]) size = builder.load(nitems) # TODO: use ptr instead of allocating and copying, use NRT_MemInfo_new # TODO: deallocate ptr _list = numba.targets.listobj.ListInstance.allocate(context, builder, sig.return_type, size) _list.size = size with cgutils.for_range(builder, size) as loop: value = builder.load(cgutils.gep_inbounds(builder, ptr, loop.index)) _list.setitem(loop.index, value) return impl_ret_new_ref(context, builder, sig.return_type, _list.value)
def __init__(self, builder, api, nargs): self.builder = builder self.api = api self.arg_count = 0 # how many function arguments have been processed self.cleanups = [] # set up switch for error processing of function arguments self.elseblk = cgutils.append_basic_block(self.builder, "arg.ok") with cgutils.goto_block(self.builder, self.elseblk): self.builder.ret(self.api.get_null_object()) self.swtblk = cgutils.append_basic_block(self.builder, ".arg.err") with cgutils.goto_block(self.builder, self.swtblk): self.swt_val = cgutils.alloca_once(self.builder, Type.int(32)) self.swt = self.builder.switch(self.builder.load(self.swt_val), self.elseblk, nargs) self.prev = self.elseblk
def get_value_as_argument(self, builder, ty, val): """Prepare local value representation as argument type representation """ argty = self.get_argument_type(ty) if argty == val.type: return val elif self.is_struct_type(ty): # Arguments are passed by pointer assert argty.pointee == val.type tmp = cgutils.alloca_once(builder, val.type) builder.store(val, tmp) return tmp elif ty == types.boolean: return builder.zext(val, argty) raise NotImplementedError("value %s -> arg %s" % (val.type, argty))
def _randrange_impl(context, builder, start, stop, step, state): state_ptr = get_state_ptr(context, builder, state) ty = stop.type zero = ir.Constant(ty, 0) one = ir.Constant(ty, 1) nptr = cgutils.alloca_once(builder, ty, name="n") # n = stop - start builder.store(builder.sub(stop, start), nptr) with builder.if_then(builder.icmp_signed('<', step, zero)): # n = (n + step + 1) // step w = builder.add(builder.add(builder.load(nptr), step), one) n = builder.sdiv(w, step) builder.store(n, nptr) with builder.if_then(builder.icmp_signed('>', step, one)): # n = (n + step - 1) // step w = builder.sub(builder.add(builder.load(nptr), step), one) n = builder.sdiv(w, step) builder.store(n, nptr) n = builder.load(nptr) with cgutils.if_unlikely(builder, builder.icmp_signed('<=', n, zero)): # n <= 0 msg = "empty range for randrange()" context.call_conv.return_user_exc(builder, ValueError, (msg, )) fnty = ir.FunctionType(ty, [ty, cgutils.true_bit.type]) fn = builder.function.module.get_or_insert_function( fnty, "llvm.ctlz.%s" % ty) nbits = builder.trunc(builder.call(fn, [n, cgutils.true_bit]), int32_t) nbits = builder.sub(ir.Constant(int32_t, ty.width), nbits) bbwhile = builder.append_basic_block("while") bbend = builder.append_basic_block("while.end") builder.branch(bbwhile) builder.position_at_end(bbwhile) r = get_next_int(context, builder, state_ptr, nbits) r = builder.trunc(r, ty) too_large = builder.icmp_signed('>=', r, n) builder.cbranch(too_large, bbwhile, bbend) builder.position_at_end(bbend) return builder.add(start, builder.mul(r, step))
def codegen(context, builder, sig, args): fnty = ir.FunctionType( ll_ssize_t, [ll_dict_type, ll_bytes, ll_hash, ll_bytes], ) [td, tkey, thashval] = sig.args [d, key, hashval] = args fn = builder.module.get_or_insert_function(fnty, name='numba_dict_lookup') dm_key = context.data_model_manager[tkey] dm_val = context.data_model_manager[td.value_type] data_key = dm_key.as_data(builder, key) ptr_key = cgutils.alloca_once_value(builder, data_key) ll_val = context.get_data_type(td.value_type) ptr_val = cgutils.alloca_once(builder, ll_val) dp = _dict_get_data(context, builder, td, d) ix = builder.call( fn, [ dp, _as_bytes(builder, ptr_key), hashval, _as_bytes(builder, ptr_val), ], ) # Load value if output is available found = builder.icmp_signed('>=', ix, ix.type(int(DKIX.EMPTY))) out = context.make_optional_none(builder, td.value_type) pout = cgutils.alloca_once_value(builder, out) with builder.if_then(found): val = dm_val.load_from_data_pointer(builder, ptr_val) context.nrt.incref(builder, td.value_type, val) loaded = context.make_optional_value(builder, td.value_type, val) builder.store(loaded, pout) out = builder.load(pout) return context.make_tuple(builder, resty, [ix, out])
def codegen(context, builder, sig, args): fnty = ir.FunctionType( ll_status, [ll_list_type.as_pointer(), ll_ssize_t, ll_ssize_t], ) fn = builder.module.get_or_insert_function(fnty, name='numba_list_new') # Determine sizeof item types ll_item = context.get_data_type(itemty.instance_type) sz_item = context.get_abi_sizeof(ll_item) reflp = cgutils.alloca_once(builder, ll_list_type, zfill=True) status = builder.call( fn, [reflp, ll_ssize_t(sz_item), ll_ssize_t(0)], ) _raise_if_error( context, builder, status, msg="Failed to allocate list", ) lp = builder.load(reflp) return lp
def box_str(typ, val, c): """ """ dtype = StringArrayPayloadType() inst_struct = c.context.make_helper(c.builder, typ, val) data_pointer = c.context.nrt.meminfo_data(c.builder, inst_struct.meminfo) # cgutils.printf(builder, "data [%p]\n", data_pointer) data_pointer = c.builder.bitcast( data_pointer, c.context.get_data_type(dtype).as_pointer()) string_array = cgutils.create_struct_proxy(dtype)( c.context, c.builder, c.builder.load(data_pointer)) # fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(64)]) # fn_print_int = c.builder.module.get_or_insert_function(fnty, # name="print_int") # c.builder.call(fn_print_int, [string_array.size]) string_list = c.pyapi.list_new(string_array.size) res = cgutils.alloca_once(c.builder, lir.IntType(8).as_pointer()) c.builder.store(string_list, res) fnty = lir.FunctionType( lir.IntType(8).as_pointer(), [ lir.IntType(8).as_pointer(), lir.IntType(8).as_pointer(), lir.IntType(64) ]) fn_getitem = c.builder.module.get_or_insert_function( fnty, name="getitem_string_array") with cgutils.for_range(c.builder, string_array.size) as loop: c_str = c.builder.call( fn_getitem, [string_array.offsets, string_array.data, loop.index]) pystr = c.pyapi.string_from_string(c_str) c.pyapi.list_setitem(string_list, loop.index, pystr) c.context.nrt.decref(c.builder, typ, val) return c.builder.load(res)
def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) self.index = cgutils.alloca_once_value(builder, zero) self.pointer = cgutils.alloca_once_value(builder, arr.data) # We can't trust strides[-1] to always contain the right # step value, see # http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking self.stride = arr.itemsize if kind == 'ndenumerate': # Zero-initialize the indices array. indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant( types.intp, arrty.ndim)) for dim in range(arrty.ndim): idxptr = cgutils.gep(builder, indices, dim) builder.store(zero, idxptr) self.indices = indices
def get_constant_generic(self, builder, ty, val): """ Return a LLVM constant representing value *val* of Numba type *ty*. """ if self.is_struct_type(ty): struct = self.get_constant_struct(builder, ty, val) if isinstance(ty, types.Record): ptrty = self.data_model_manager[ty].get_data_type() ptr = cgutils.alloca_once(builder, ptrty) builder.store(struct, ptr) return ptr return struct elif isinstance(ty, types.ExternalFunctionPointer): ptrty = self.get_function_pointer_type(ty) ptrval = ty.get_pointer(val) return builder.inttoptr(self.get_constant(types.intp, ptrval), ptrty) else: return self.get_constant(ty, val)
def codegen(context, builder, sig, args): fnty = ir.FunctionType( ll_status, [ll_list_type, ll_ssize_t, ll_bytes], ) [tl, tindex] = sig.args [l, index] = args fn = builder.module.get_or_insert_function( fnty, name='numba_list_{}'.format(op)) dm_item = context.data_model_manager[tl.item_type] ll_item = context.get_data_type(tl.item_type) ptr_item = cgutils.alloca_once(builder, ll_item) lp = _container_get_data(context, builder, tl, l) status = builder.call( fn, [ lp, index, _as_bytes(builder, ptr_item), ], ) # Load item if output is available found = builder.icmp_signed('>=', status, status.type(int(ListStatus.LIST_OK))) out = context.make_optional_none( builder, tl.item_type if IS_NOT_NONE else types.int64) pout = cgutils.alloca_once_value(builder, out) with builder.if_then(found): if IS_NOT_NONE: item = dm_item.load_from_data_pointer(builder, ptr_item) context.nrt.incref(builder, tl.item_type, item) loaded = context.make_optional_value(builder, tl.item_type, item) builder.store(loaded, pout) out = builder.load(pout) return context.make_tuple(builder, resty, [status, out])
def call_function(self, builder, callee, resty, argtys, args, env=None): """ Call the Numba-compiled *callee*, using the same calling convention as in get_function_type(). """ if env is None: # This only works with functions that don't use the environment # (nopython functions). env = lc.Constant.null(PYOBJECT) retty = callee.args[0].type.pointee retval = cgutils.alloca_once(builder, retty) # initialize return value to zeros builder.store(lc.Constant.null(retty), retval) args = [ self.get_value_as_argument(builder, ty, arg) for ty, arg in zip(argtys, args) ] realargs = [retval, env] + list(args) code = builder.call(callee, realargs) status = self.get_return_status(builder, code) return status, builder.load(retval)
def h5_create_dset(context, builder, sig, args): # insert the dset_name string arg fnty = lir.FunctionType( lir.IntType(8).as_pointer(), [lir.IntType(8).as_pointer()]) fn = builder.module.get_or_insert_function(fnty, name="get_c_str") val2 = builder.call(fn, [args[1]]) # extra last arg type for type enum arg_typs = [ lir.IntType(32), lir.IntType(8).as_pointer(), lir.IntType(32), lir.IntType(64).as_pointer(), lir.IntType(32) ] fnty = lir.FunctionType(lir.IntType(32), arg_typs) fn = builder.module.get_or_insert_function(fnty, name="hpat_h5_create_dset") ndims = sig.args[2].count ndims_arg = lir.Constant(lir.IntType(32), ndims) # store size vars array struct to pointer count_ptr = cgutils.alloca_once(builder, args[2].type) builder.store(args[2], count_ptr) t_fnty = lir.FunctionType(lir.IntType(32), [lir.IntType(8).as_pointer()]) t_fn = builder.module.get_or_insert_function(t_fnty, name="hpat_h5_get_type_enum") typ_arg = builder.call(t_fn, [args[3]]) call_args = [ args[0], val2, ndims_arg, builder.bitcast(count_ptr, lir.IntType(64).as_pointer()), typ_arg ] return builder.call(fn, call_args)
def bytes_to_charseq(context, builder, fromty, toty, val): barr = cgutils.create_struct_proxy(fromty)(context, builder, value=val) src = builder.bitcast(barr.data, ir.IntType(8).as_pointer()) src_length = barr.nitems lty = context.get_value_type(toty) dstint_t = ir.IntType(8) dst_ptr = cgutils.alloca_once(builder, lty) dst = builder.bitcast(dst_ptr, dstint_t.as_pointer()) dst_length = ir.Constant(src_length.type, toty.count) is_shorter_value = builder.icmp_unsigned('<', src_length, dst_length) count = builder.select(is_shorter_value, src_length, dst_length) with builder.if_then(is_shorter_value): cgutils.memset(builder, dst, ir.Constant(src_length.type, toty.count), 0) with cgutils.for_range(builder, count) as loop: in_ptr = builder.gep(src, [loop.index]) in_val = builder.zext(builder.load(in_ptr), dstint_t) builder.store(in_val, builder.gep(dst, [loop.index])) return builder.load(dst_ptr)