def datetime_minus_timedelta(context, builder, sig, args): dt_arg, td_arg = args dt_type, td_type = sig.args res = _datetime_minus_timedelta( context, builder, dt_arg, dt_type.unit, td_arg, td_type.unit, sig.return_type.unit, ) return impl_ret_untracked(context, builder, sig.return_type, res)
def range3_impl(context, builder, sig, args): """ range(start: int, stop: int, step: int) -> range object """ [start, stop, step] = args state = RangeState(context, builder) state.start = start state.stop = stop state.step = step return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
def range2_impl(context, builder, sig, args): """ range(start: int, stop: int) -> range object """ start, stop = args state = RangeState(context, builder) state.start = start state.stop = stop state.step = context.get_constant(int_type, 1) return impl_ret_untracked(context, builder, range_state_type, state._getvalue())
def timedelta_max_impl(context, builder, sig, args): # just a regular int64 max avoiding nats. # note this could be optimizing relying on the actual value of NAT # but as NumPy doesn't rely on this, this seems more resilient in1, in2 = args in1_not_nat = is_not_nat(builder, in1) in2_not_nat = is_not_nat(builder, in2) in1_ge_in2 = builder.icmp(lc.ICMP_SGE, in1, in2) res = builder.select(in1_ge_in2, in1, in2) res = builder.select(in1_not_nat, res, in2) res = builder.select(in2_not_nat, res, in1) return impl_ret_untracked(context, builder, sig.return_type, res)
def atanh_impl(context, builder, sig, args): LN_4 = math.log(4) THRES_LARGE = math.sqrt(mathimpl.FLT_MAX / 4) THRES_SMALL = math.sqrt(mathimpl.FLT_MIN) PI_12 = math.pi / 2 def atanh_impl(z): """cmath.atanh(z)""" # CPython's algorithm (see c_atanh() in cmathmodule.c) if z.real < 0.0: # Reduce to case where z.real >= 0., using atanh(z) = -atanh(-z). negate = True z = -z else: negate = False ay = abs(z.imag) if math.isnan(z.real) or z.real > THRES_LARGE or ay > THRES_LARGE: if math.isinf(z.imag): real = math.copysign(0.0, z.real) elif math.isinf(z.real): real = 0.0 else: # may be safe from overflow, depending on hypot's implementation... h = math.hypot(z.real * 0.5, z.imag * 0.5) real = z.real / 4.0 / h / h imag = -math.copysign(PI_12, -z.imag) elif z.real == 1.0 and ay < THRES_SMALL: # C99 standard says: atanh(1+/-0.) should be inf +/- 0j if ay == 0.0: real = INF imag = z.imag else: real = -math.log( math.sqrt(ay) / math.sqrt(math.hypot(ay, 2.0))) imag = math.copysign(math.atan2(2.0, -ay) / 2, z.imag) else: sqay = ay * ay zr1 = 1 - z.real real = math.log1p(4.0 * z.real / (zr1 * zr1 + sqay)) * 0.25 imag = -math.atan2(-2.0 * z.imag, zr1 * (1 + z.real) - sqay) * 0.5 if math.isnan(z.imag): imag = NAN if negate: return complex(-real, -imag) else: return complex(real, imag) res = context.compile_internal(builder, atanh_impl, sig, args) return impl_ret_untracked(context, builder, sig, res)
def negative_binomial_impl(context, builder, sig, args): _gamma = np.random.gamma _poisson = np.random.poisson def negative_binomial_impl(n, p): if n <= 0: raise ValueError("negative_binomial(): n <= 0") if p < 0.0 or p > 1.0: raise ValueError("negative_binomial(): p outside of [0, 1]") Y = _gamma(n, (1.0 - p) / p) return _poisson(Y) res = context.compile_internal(builder, negative_binomial_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)
def frexp_impl(context, builder, sig, args): val, = args fltty = context.get_data_type(sig.args[0]) intty = context.get_data_type(sig.return_type[1]) expptr = cgutils.alloca_once(builder, intty, name='exp') fnty = Type.function(fltty, (fltty, Type.pointer(intty))) fname = { "float": "numba_frexpf", "double": "numba_frexp", }[str(fltty)] fn = cgutils.get_or_insert_function(builder.module, fnty, fname) res = builder.call(fn, (val, expptr)) res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr))) return impl_ret_untracked(context, builder, sig.return_type, res)
def timedelta_over_timedelta(context, builder, sig, args): [va, vb] = args [ta, tb] = sig.args not_nan = are_not_nat(builder, [va, vb]) ll_ret_type = context.get_value_type(sig.return_type) ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') builder.store(Constant(ll_ret_type, float('nan')), ret) with cgutils.if_likely(builder, not_nan): va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) va = builder.sitofp(va, ll_ret_type) vb = builder.sitofp(vb, ll_ret_type) builder.store(builder.fdiv(va, vb), ret) res = builder.load(ret) return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_sub_impl(context, builder, sig, args): [cx, cy] = args ty = sig.args[0] x = context.make_complex(builder, ty, value=cx) y = context.make_complex(builder, ty, value=cy) z = context.make_complex(builder, ty) a = x.real b = x.imag c = y.real d = y.imag z.real = builder.fsub(a, c) z.imag = builder.fsub(b, d) res = z._getvalue() return impl_ret_untracked(context, builder, sig.return_type, res)
def datetime_minus_datetime(context, builder, sig, args): va, vb = args ta, tb = sig.args unit_a = ta.unit unit_b = tb.unit ret_unit = sig.return_type.unit ret = alloc_timedelta_result(builder) with cgutils.if_likely(builder, are_not_nat(builder, [va, vb])): va = convert_datetime_for_arith(builder, va, unit_a, ret_unit) vb = convert_datetime_for_arith(builder, vb, unit_b, ret_unit) ret_val = builder.sub(va, vb) builder.store(ret_val, ret) res = builder.load(ret) return impl_ret_untracked(context, builder, sig.return_type, res)
def laplace_impl(context, builder, sig, args): _random = np.random.random _log = math.log def laplace_impl(loc, scale): U = _random() if U < 0.5: return loc + scale * _log(U + U) else: return loc - scale * _log(2.0 - U - U) sig, args = _fill_defaults(context, builder, sig, args, (0.0, 1.0)) res = context.compile_internal(builder, laplace_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)
def codegen(cgctx, builder, sig, args): (arg_0, arg_1) = args fty = ir.FunctionType( ir.IntType(64), [ir.IntType(64), ir.IntType(64)]) mul = builder.asm( fty, "mov $2, $0; imul $1, $0", "=r,r,r", (arg_0, arg_1), name="asm_mul", side_effect=False, ) return impl_ret_untracked(cgctx, builder, sig.return_type, mul)
def getitem_unituple(context, builder, sig, args): tupty, _ = sig.args tup, idx = args errmsg_oob = ("tuple index out of range",) if len(tupty) == 0: # Empty tuple. # Always branch and raise IndexError with builder.if_then(cgutils.true_bit): context.call_conv.return_user_exc(builder, IndexError, errmsg_oob) # This is unreachable in runtime, # but it exists to not terminate the current basicblock. res = context.get_constant_null(sig.return_type) return impl_ret_untracked(context, builder, sig.return_type, res) else: # The tuple is not empty bbelse = builder.append_basic_block("switch.else") bbend = builder.append_basic_block("switch.end") switch = builder.switch(idx, bbelse) with builder.goto_block(bbelse): context.call_conv.return_user_exc(builder, IndexError, errmsg_oob) lrtty = context.get_value_type(tupty.dtype) with builder.goto_block(bbend): phinode = builder.phi(lrtty) for i in range(tupty.count): ki = context.get_constant(types.intp, i) bbi = builder.append_basic_block("switch.%d" % i) switch.add_case(ki, bbi) # handle negative indexing, create case (-tuple.count + i) to # reference same block as i kin = context.get_constant(types.intp, -tupty.count + i) switch.add_case(kin, bbi) with builder.goto_block(bbi): value = builder.extract_value(tup, i) builder.branch(bbend) phinode.add_incoming(value, bbi) builder.position_at_end(bbend) res = phinode assert sig.return_type == tupty.dtype return impl_ret_borrowed(context, builder, sig.return_type, res)
def timedelta_min_impl(context, builder, sig, args): # note this could be optimizing relying on the actual value of NAT # but as NumPy doesn't rely on this, this seems more resilient in1, in2 = args in1_not_nat = is_not_nat(builder, in1) in2_not_nat = is_not_nat(builder, in2) in1_le_in2 = builder.icmp_signed('<=', in1, in2) res = builder.select(in1_le_in2, in1, in2) if NAT_DOMINATES and numpy_support.numpy_version >= (1, 18): # NaT now dominates, like NaN in1, in2 = in2, in1 res = builder.select(in1_not_nat, res, in2) res = builder.select(in2_not_nat, res, in1) return impl_ret_untracked(context, builder, sig.return_type, res)
def wrapper(context, builder, sig, args): [typ] = sig.args [value] = args z = context.make_complex(builder, typ, value=value) x = z.real y = z.imag # Same as above: math.isfinite() is unavailable on 2.x so we precompute # its value and pass it to the pure Python implementation. x_is_finite = mathimpl.is_finite(builder, x) y_is_finite = mathimpl.is_finite(builder, y) inner_sig = signature(sig.return_type, *(typ.underlying_float,) * 2 + (types.boolean,) * 2) res = context.compile_internal(builder, inner_func, inner_sig, (x, y, x_is_finite, y_is_finite)) return impl_ret_untracked(context, builder, sig, res)
def expovariate_impl(context, builder, sig, args): _random = random.random _log = math.log def expovariate_impl(lambd): """Exponential distribution. Taken from CPython.""" # lambd: rate lambd = 1/mean # ('lambda' is a Python reserved word) # we use 1-random() instead of random() to preclude the # possibility of taking the log of zero. return -_log(1.0 - _random()) / lambd res = context.compile_internal(builder, expovariate_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)
def print_item_impl(context, builder, sig, args): """ Print a single constant value. """ ty, = sig.args val = ty.literal_value pyapi = context.get_python_api(builder) strobj = pyapi.unserialize(pyapi.serialize_object(val)) pyapi.print_object(strobj) pyapi.decref(strobj) res = context.get_dummy_value() return impl_ret_untracked(context, builder, sig.return_type, res)
def string_type_to_const(context, builder, fromty, toty, val): # calling str() since the const value can be non-str like tuple const (CSV) cstr = context.insert_const_string(builder.module, str(toty.literal_value)) # check to make sure Const value matches stored string # call str == cstr fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(), lir.IntType(8).as_pointer()]) fn = cgutils.get_or_insert_function(builder.module, fnty, name="str_equal_cstr") match = builder.call(fn, [val, cstr]) with cgutils.if_unlikely(builder, builder.not_(match)): # Raise RuntimeError about the assumption violation usermsg = "constant string assumption violated" errmsg = "{}: expecting {}".format(usermsg, toty.literal_value) context.call_conv.return_user_exc(builder, RuntimeError, (errmsg,)) return impl_ret_untracked(context, builder, toty, cstr)
def slice_constructor_impl(context, builder, sig, args): ( default_start_pos, default_start_neg, default_stop_pos, default_stop_neg, default_step, ) = [context.get_constant(types.intp, x) for x in get_defaults(context)] slice_args = [None] * 3 # Fetch non-None arguments if len(args) == 1 and sig.args[0] is not types.none: slice_args[1] = args[0] else: for i, (ty, val) in enumerate(zip(sig.args, args)): if ty is not types.none: slice_args[i] = val # Fill omitted arguments def get_arg_value(i, default): val = slice_args[i] if val is None: return default else: return val step = get_arg_value(2, default_step) is_step_negative = builder.icmp_signed( "<", step, context.get_constant(types.intp, 0) ) default_stop = builder.select(is_step_negative, default_stop_neg, default_stop_pos) default_start = builder.select( is_step_negative, default_start_neg, default_start_pos ) stop = get_arg_value(1, default_stop) start = get_arg_value(0, default_start) ty = sig.return_type sli = context.make_helper(builder, sig.return_type) sli.start = start sli.stop = stop sli.step = step res = sli._getvalue() return impl_ret_untracked(context, builder, sig.return_type, res)
def float_impl(context, builder, sig, args): """ Implement *fn* for a types.Float input. """ [val] = args mod = builder.module input_type = sig.args[0] lty = context.get_value_type(input_type) func_name = { types.float32: f32extern, types.float64: f64extern, }[input_type] fnty = Type.function(lty, [lty]) fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name) res = builder.call(fn, (val, )) res = context.cast(builder, res, input_type, sig.return_type) return impl_ret_untracked(context, builder, sig.return_type, res)
def real_floordiv_impl(context, builder, sig, args, loc=None): x, y = args res = cgutils.alloca_once(builder, x.type) with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False) as (if_zero, if_non_zero): with if_zero: if not context.error_model.fp_zero_division( builder, ("division by zero", ), loc): # No exception raised => compute the +/-inf or nan result, # and set the FP exception word for Numpy warnings. quot = builder.fdiv(x, y) builder.store(quot, res) with if_non_zero: quot, _ = real_divmod(context, builder, x, y) builder.store(quot, res) return impl_ret_untracked(context, builder, sig.return_type, builder.load(res))
def wald_impl(context, builder, sig, args): def wald_impl(mean, scale): if mean <= 0.0: raise ValueError("wald(): mean <= 0") if scale <= 0.0: raise ValueError("wald(): scale <= 0") mu_2l = mean / (2.0 * scale) Y = np.random.standard_normal() Y = mean * Y * Y X = mean + mu_2l * (Y - math.sqrt(4 * scale * Y + Y * Y)) U = np.random.random() if U <= mean / (mean + X): return X else: return mean * mean / X res = context.compile_internal(builder, wald_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)
def triangular_impl_2(context, builder, sig, args): fltty = sig.return_type low, high = args state_ptr = get_state_ptr(context, builder, "py") randval = get_next_double(context, builder, state_ptr) def triangular_impl_2(randval, low, high): u = randval c = 0.5 if u > c: u = 1.0 - u low, high = high, low return low + (high - low) * math.sqrt(u * c) res = context.compile_internal(builder, triangular_impl_2, signature(*(fltty, ) * 4), (randval, low, high)) return impl_ret_untracked(context, builder, sig.return_type, res)
def lower_get_type_max_value(context, builder, sig, args): typ = sig.args[0].dtype bw = typ.bitwidth if isinstance(typ, types.Integer): lty = ir.IntType(bw) val = typ.maxval res = ir.Constant(lty, val) elif isinstance(typ, types.Float): if bw == 32: lty = ir.FloatType() elif bw == 64: lty = ir.DoubleType() else: raise NotImplementedError("llvmlite only supports 32 and 64 bit floats") npty = getattr(np, 'float{}'.format(bw)) res = ir.Constant(lty, np.finfo(npty).max) return impl_ret_untracked(context, builder, lty, res)
def optional_is_none(context, builder, sig, args): """ Check if an Optional value is invalid """ [lty, rty] = sig.args [lval, rval] = args # Make sure None is on the right if lty == types.none: lty, rty = rty, lty lval, rval = rval, lval opt_type = lty opt_val = lval opt = context.make_helper(builder, opt_type, opt_val) res = builder.not_(cgutils.as_bool_bit(builder, opt.valid)) return impl_ret_untracked(context, builder, sig.return_type, res)
def number_constructor(context, builder, sig, args): """ Call a number class, e.g. np.int32(...) """ if isinstance(sig.return_type, types.Array): # Array constructor dt = sig.return_type.dtype def foo(*arg_hack): return np.array(arg_hack, dtype=dt) res = context.compile_internal(builder, foo, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res) else: # Scalar constructor [val] = args [valty] = sig.args return context.cast(builder, val, valty, sig.return_type)
def print_varargs_impl(context, builder, sig, args): """ A entire print() call. """ pyapi = context.get_python_api(builder) gil = pyapi.gil_ensure() for i, (argtype, argval) in enumerate(zip(sig.args, args)): signature = typing.signature(types.none, argtype) imp = context.get_function("print_item", signature) imp(builder, [argval]) if i < len(args) - 1: pyapi.print_string(' ') pyapi.print_string('\n') pyapi.gil_release(gil) res = context.get_dummy_value() return impl_ret_untracked(context, builder, sig.return_type, res)
def impl(context, builder, sig, args): [va, vb] = args [ta, tb] = sig.args ret = alloc_boolean_result(builder) with builder.if_else(are_not_nat(builder, [va, vb])) as (then, otherwise): with then: norm_a, norm_b = normalize_timedeltas(context, builder, va, vb, ta, tb) builder.store(builder.icmp(ll_op, norm_a, norm_b), ret) with otherwise: if numpy_support.numpy_version < (1, 16): # No scaling when comparing NaT with something else # (i.e. NaT is <= everything else, since it's the smallest # int64 value) builder.store(builder.icmp(ll_op, va, vb), ret) else: # NumPy >= 1.16 switched to NaT >=/>/</<= NaT being False builder.store(cgutils.false_bit, ret) res = builder.load(ret) return impl_ret_untracked(context, builder, sig.return_type, res)
def zipf_impl(context, builder, sig, args): _random = np.random.random intty = sig.return_type def zipf_impl(a): if a <= 1.0: raise ValueError("zipf(): a <= 1") am1 = a - 1.0 b = 2.0**am1 while 1: U = 1.0 - _random() V = _random() X = intty(math.floor(U**(-1.0 / am1))) T = (1.0 + 1.0 / X)**am1 if X >= 1 and V * X * (T - 1.0) / (b - 1.0) <= (T / b): return X res = context.compile_internal(builder, zipf_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)
def int_power_impl(context, builder, sig, args): """ a ^ b, where a is an integer or real, and b an integer """ is_integer = isinstance(sig.args[0], types.Integer) tp = sig.return_type zerodiv_return = _get_power_zerodiv_return(context, tp) def int_power(a, b): # Ensure computations are done with a large enough width r = tp(1) a = tp(a) if b < 0: invert = True exp = -b if exp < 0: raise OverflowError if is_integer: if a == 0: if zerodiv_return: return zerodiv_return else: raise ZeroDivisionError( "0 cannot be raised to a negative power") if a != 1 and a != -1: return 0 else: invert = False exp = b if exp > 0x10000: # Optimization cutoff: fallback on the generic algorithm return math.pow(a, float(b)) while exp != 0: if exp & 1: r *= a exp >>= 1 a *= a return 1.0 / r if invert else r res = context.compile_internal(builder, int_power, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res)