def iternext_specific(self, context, builder, result): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, exhausted): result.set_valid(False) builder.branch(bbend) indices = [ builder.load(cgutils.gep(builder, self.indices, dim)) for dim in range(ndim) ] result.yield_(cgutils.pack_array(builder, indices)) result.set_valid(True) shape = cgutils.unpack_tuple(builder, self.shape, ndim) _increment_indices(context, builder, ndim, shape, self.indices, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def return_optional_value(self, builder, retty, valty, value): if valty == types.none: # Value is none self.return_native_none(builder) elif retty == valty: # Value is an optional, need a runtime switch optval = self.context.make_helper(builder, retty, value=value) validbit = cgutils.as_bool_bit(builder, optval.valid) with builder.if_then(validbit): retval = self.context.get_return_value(builder, retty.type, optval.data) self.return_value(builder, retval) self.return_native_none(builder) elif not isinstance(valty, types.Optional): # Value is not an optional, need a cast if valty != retty.type: value = self.context.cast(builder, value, fromty=valty, toty=retty.type) retval = self.context.get_return_value(builder, retty.type, value) self.return_value(builder, retval) else: raise NotImplementedError("returning {0} for {1}".format( valty, retty))
def return_optional_value(self, builder, retty, valty, value): if valty == types.none: # Value is none self.return_native_none(builder) elif retty == valty: # Value is an optional, need a runtime switch optval = self.context.make_helper(builder, retty, value=value) validbit = cgutils.as_bool_bit(builder, optval.valid) with builder.if_then(validbit): retval = self.context.get_return_value(builder, retty.type, optval.data) self.return_value(builder, retval) self.return_native_none(builder) elif not isinstance(valty, types.Optional): # Value is not an optional, need a cast if valty != retty.type: value = self.context.cast(builder, value, fromty=valty, toty=retty.type) retval = self.context.get_return_value(builder, retty.type, value) self.return_value(builder, retval) else: raise NotImplementedError("returning {0} for {1}".format(valty, retty))
def return_optional_value(self, builder, retty, valty, value): if valty == types.none: self.return_native_none(builder) elif retty == valty: optcls = self.context.make_optional(retty) optval = optcls(self.context, builder, value=value) validbit = cgutils.as_bool_bit(builder, optval.valid) with cgutils.ifthen(builder, validbit): self.return_value(builder, optval.data) self.return_native_none(builder) elif not isinstance(valty, types.Optional): if valty != retty.type: value = self.context.cast(builder, value, fromty=valty, toty=retty.type) self.return_value(builder, value) else: raise NotImplementedError("returning {0} for {1}".format( valty, retty))
def optional_to_optional(context, builder, fromty, toty, val): """ The handling of optional->optional cast must be special cased for correct propagation of None value. Given type T and U. casting of T? to U? (? denotes optional) should always succeed. If the from-value is None, the None value the casted value (U?) should be None; otherwise, the from-value is casted to U. This is different from casting T? to U, which requires the from-value must not be None. """ optval = context.make_helper(builder, fromty, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) # Create uninitialized optional value outoptval = context.make_helper(builder, toty) with builder.if_else(validbit) as (is_valid, is_not_valid): with is_valid: # Cast internal value outoptval.valid = cgutils.true_bit outoptval.data = context.cast(builder, optval.data, fromty.type, toty.type) with is_not_valid: # Store None to result outoptval.valid = cgutils.false_bit outoptval.data = cgutils.get_null_value( outoptval.data.type) return outoptval._getvalue()
def optional_to_any(context, builder, fromty, toty, val): optval = context.make_helper(builder, fromty, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) with builder.if_then(builder.not_(validbit), likely=False): msg = "expected %s, got None" % (fromty.type,) context.call_conv.return_user_exc(builder, TypeError, (msg,)) return context.cast(builder, optval.data, fromty.type, toty)
def optional_is_none(context, builder, sig, args): """ Check if an Optional value is invalid """ [lty, rty] = sig.args [lval, rval] = args # Make sure None is on the right if lty == types.none: lty, rty = rty, lty lval, rval = rval, lval opt_type = lty opt_val = lval opt = context.make_helper(builder, opt_type, opt_val) res = builder.not_(cgutils.as_bool_bit(builder, opt.valid)) return impl_ret_untracked(context, builder, sig.return_type, res)
def return_optional_value(self, builder, retty, valty, value): if valty == types.none: self.return_native_none(builder) elif retty == valty: optcls = self.context.make_optional(retty) optval = optcls(self.context, builder, value=value) validbit = cgutils.as_bool_bit(builder, optval.valid) with builder.if_then(validbit): self.return_value(builder, optval.data) self.return_native_none(builder) elif not isinstance(valty, types.Optional): if valty != retty.type: value = self.context.cast(builder, value, fromty=valty, toty=retty.type) self.return_value(builder, value) else: raise NotImplementedError("returning {0} for {1}".format(valty, retty))
def iternext_specific(self, context, builder, result): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, exhausted): result.set_valid(False) builder.branch(bbend) indices = [builder.load(cgutils.gep(builder, self.indices, dim)) for dim in range(ndim)] result.yield_(cgutils.pack_array(builder, indices)) result.set_valid(True) shape = cgutils.unpack_tuple(builder, self.shape, ndim) _increment_indices(context, builder, ndim, shape, self.indices, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) minus_one = context.get_constant(types.intp, -1) result.set_valid(True) bbcont = cgutils.append_basic_block(builder, 'continued') bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_empty = cgutils.as_bool_bit(builder, builder.load(self.empty)) with cgutils.if_unlikely(builder, is_empty): result.set_valid(False) builder.branch(bbend) # Current pointer inside last dimension last_ptr = cgutils.alloca_once(builder, data.type) # Walk from inner dimension to outer for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.load(idxptr) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => we point to the right slot ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) builder.store(ptr, last_ptr) # Compute next index and pointer for this dimension next_ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(next_ptr, ptrptr) next_idx = builder.add(idx, one) builder.store(next_idx, idxptr) # Reset inner dimensions for inner_dim in range(dim + 1, ndim): idxptr = cgutils.gep(builder, indices, inner_dim) ptrptr = cgutils.gep(builder, pointers, inner_dim) # Compute next index and pointer for this dimension inner_ptr = cgutils.pointer_add( builder, ptr, strides[inner_dim]) builder.store(inner_ptr, ptrptr) builder.store(one, idxptr) builder.branch(bbcont) # End of array => skip to end result.set_valid(False) builder.branch(bbend) builder.position_at_end(bbcont) # After processing of indices and pointers: fetch value. ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) builder.branch(bbend) builder.position_at_end(bbend)
def cast(self, builder, val, fromty, toty): if fromty == toty or toty == types.Any or isinstance(toty, types.Kind): return val elif ((fromty in types.unsigned_domain and toty in types.signed_domain) or (fromty in types.integer_domain and toty in types.unsigned_domain)): lfrom = self.get_value_type(fromty) lto = self.get_value_type(toty) if lfrom.width <= lto.width: return builder.zext(val, lto) elif lfrom.width > lto.width: return builder.trunc(val, lto) elif fromty in types.signed_domain and toty in types.signed_domain: lfrom = self.get_value_type(fromty) lto = self.get_value_type(toty) if lfrom.width <= lto.width: return builder.sext(val, lto) elif lfrom.width > lto.width: return builder.trunc(val, lto) elif fromty in types.real_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty == types.float32 and toty == types.float64: return builder.fpext(val, lty) elif fromty == types.float64 and toty == types.float32: return builder.fptrunc(val, lty) elif fromty in types.real_domain and toty in types.complex_domain: if fromty == types.float32: if toty == types.complex128: real = self.cast(builder, val, fromty, types.float64) else: real = val elif fromty == types.float64: if toty == types.complex64: real = self.cast(builder, val, fromty, types.float32) else: real = val if toty == types.complex128: imag = self.get_constant(types.float64, 0) elif toty == types.complex64: imag = self.get_constant(types.float32, 0) else: raise Exception("unreachable") cmplx = self.make_complex(toty)(self, builder) cmplx.real = real cmplx.imag = imag return cmplx._getvalue() elif fromty in types.integer_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty in types.signed_domain: return builder.sitofp(val, lty) else: return builder.uitofp(val, lty) elif toty in types.integer_domain and fromty in types.real_domain: lty = self.get_value_type(toty) if toty in types.signed_domain: return builder.fptosi(val, lty) else: return builder.fptoui(val, lty) elif fromty in types.integer_domain and toty in types.complex_domain: cmplxcls, flty = builtins.get_complex_info(toty) cmpl = cmplxcls(self, builder) cmpl.real = self.cast(builder, val, fromty, flty) cmpl.imag = self.get_constant(flty, 0) return cmpl._getvalue() elif fromty in types.complex_domain and toty in types.complex_domain: srccls, srcty = builtins.get_complex_info(fromty) dstcls, dstty = builtins.get_complex_info(toty) src = srccls(self, builder, value=val) dst = dstcls(self, builder) dst.real = self.cast(builder, src.real, srcty, dstty) dst.imag = self.cast(builder, src.imag, srcty, dstty) return dst._getvalue() elif (isinstance(toty, types.UniTuple) and isinstance(fromty, types.UniTuple) and len(fromty) == len(toty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [self.cast(builder, i, fromty.dtype, toty.dtype) for i in olditems] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif (types.is_int_tuple(toty) and types.is_int_tuple(fromty) and len(toty) == len(fromty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [self.cast(builder, i, t, toty.dtype) for i, t in zip(olditems, fromty.types)] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif toty == types.boolean: return self.is_true(builder, fromty, val) elif fromty == types.boolean: # first promote to int32 asint = builder.zext(val, Type.int()) # then promote to number return self.cast(builder, asint, types.int32, toty) elif fromty == types.none and isinstance(toty, types.Optional): return self.make_optional_none(builder, toty.type) elif isinstance(toty, types.Optional): casted = self.cast(builder, val, fromty, toty.type) return self.make_optional_value(builder, toty.type, casted) elif isinstance(fromty, types.Optional): optty = self.make_optional(fromty) optval = optty(self, builder, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) with cgutils.if_unlikely(builder, builder.not_(validbit)): self.return_errcode(builder, errcode.NONE_TYPE_ERROR) return optval.data elif (isinstance(fromty, types.Array) and isinstance(toty, types.Array)): # Type inference should have prevented illegal array casting. assert toty.layout == 'A' return val raise NotImplementedError("cast", val, fromty, toty)
def cast(self, builder, val, fromty, toty): if fromty == toty or toty == types.Any or isinstance(toty, types.Kind): return val elif ((fromty in types.unsigned_domain and toty in types.signed_domain) or (fromty in types.integer_domain and toty in types.unsigned_domain)): lfrom = self.get_value_type(fromty) lto = self.get_value_type(toty) if lfrom.width <= lto.width: return builder.zext(val, lto) elif lfrom.width > lto.width: return builder.trunc(val, lto) elif fromty in types.signed_domain and toty in types.signed_domain: lfrom = self.get_value_type(fromty) lto = self.get_value_type(toty) if lfrom.width <= lto.width: return builder.sext(val, lto) elif lfrom.width > lto.width: return builder.trunc(val, lto) elif fromty in types.real_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty == types.float32 and toty == types.float64: return builder.fpext(val, lty) elif fromty == types.float64 and toty == types.float32: return builder.fptrunc(val, lty) elif fromty in types.real_domain and toty in types.complex_domain: if fromty == types.float32: if toty == types.complex128: real = self.cast(builder, val, fromty, types.float64) else: real = val elif fromty == types.float64: if toty == types.complex64: real = self.cast(builder, val, fromty, types.float32) else: real = val if toty == types.complex128: imag = self.get_constant(types.float64, 0) elif toty == types.complex64: imag = self.get_constant(types.float32, 0) else: raise Exception("unreachable") cmplx = self.make_complex(toty)(self, builder) cmplx.real = real cmplx.imag = imag return cmplx._getvalue() elif fromty in types.integer_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty in types.signed_domain: return builder.sitofp(val, lty) else: return builder.uitofp(val, lty) elif toty in types.integer_domain and fromty in types.real_domain: lty = self.get_value_type(toty) if toty in types.signed_domain: return builder.fptosi(val, lty) else: return builder.fptoui(val, lty) elif fromty in types.integer_domain and toty in types.complex_domain: cmplxcls, flty = builtins.get_complex_info(toty) cmpl = cmplxcls(self, builder) cmpl.real = self.cast(builder, val, fromty, flty) cmpl.imag = self.get_constant(flty, 0) return cmpl._getvalue() elif fromty in types.complex_domain and toty in types.complex_domain: srccls, srcty = builtins.get_complex_info(fromty) dstcls, dstty = builtins.get_complex_info(toty) src = srccls(self, builder, value=val) dst = dstcls(self, builder) dst.real = self.cast(builder, src.real, srcty, dstty) dst.imag = self.cast(builder, src.imag, srcty, dstty) return dst._getvalue() elif (isinstance(toty, types.UniTuple) and isinstance(fromty, types.UniTuple) and len(fromty) == len(toty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [ self.cast(builder, i, fromty.dtype, toty.dtype) for i in olditems ] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif (isinstance(fromty, (types.UniTuple, types.Tuple)) and isinstance(toty, (types.UniTuple, types.Tuple)) and len(toty) == len(fromty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [ self.cast(builder, i, f, t) for i, f, t in zip(olditems, fromty, toty) ] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif toty == types.boolean: return self.is_true(builder, fromty, val) elif fromty == types.boolean: # first promote to int32 asint = builder.zext(val, Type.int()) # then promote to number return self.cast(builder, asint, types.int32, toty) elif fromty == types.none and isinstance(toty, types.Optional): return self.make_optional_none(builder, toty.type) elif isinstance(toty, types.Optional): casted = self.cast(builder, val, fromty, toty.type) return self.make_optional_value(builder, toty.type, casted) elif isinstance(fromty, types.Optional): optty = self.make_optional(fromty) optval = optty(self, builder, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) with cgutils.if_unlikely(builder, builder.not_(validbit)): msg = "expected %s, got None" % (fromty.type, ) self.call_conv.return_user_exc(builder, TypeError, (msg, )) return optval.data elif (isinstance(fromty, types.Array) and isinstance(toty, types.Array)): # Type inference should have prevented illegal array casting. assert toty.layout == 'A' return val raise NotImplementedError("cast", val, fromty, toty)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) minus_one = context.get_constant(types.intp, -1) result.set_valid(True) bbcont = cgutils.append_basic_block(builder, 'continued') bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_empty = cgutils.as_bool_bit(builder, builder.load(self.empty)) with cgutils.if_unlikely(builder, is_empty): result.set_valid(False) builder.branch(bbend) # Current pointer inside last dimension last_ptr = cgutils.alloca_once(builder, data.type) # Walk from inner dimension to outer for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.load(idxptr) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => we point to the right slot ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) builder.store(ptr, last_ptr) # Compute next index and pointer for this dimension next_ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(next_ptr, ptrptr) next_idx = builder.add(idx, one) builder.store(next_idx, idxptr) # Reset inner dimensions for inner_dim in range(dim + 1, ndim): idxptr = cgutils.gep(builder, indices, inner_dim) ptrptr = cgutils.gep(builder, pointers, inner_dim) # Compute next index and pointer for this dimension inner_ptr = cgutils.pointer_add(builder, ptr, strides[inner_dim]) builder.store(inner_ptr, ptrptr) builder.store(one, idxptr) builder.branch(bbcont) # End of array => skip to end result.set_valid(False) builder.branch(bbend) builder.position_at_end(bbcont) # After processing of indices and pointers: fetch value. ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) builder.branch(bbend) builder.position_at_end(bbend)
def cast(self, builder, val, fromty, toty): if fromty == toty or toty == types.Any or isinstance(toty, types.Kind): return val elif isinstance(fromty, types.Integer) and isinstance(toty, types.Integer): if toty.bitwidth == fromty.bitwidth: # Just a change of signedness return val elif toty.bitwidth < fromty.bitwidth: # Downcast return builder.trunc(val, self.get_value_type(toty)) elif fromty.signed: # Signed upcast return builder.sext(val, self.get_value_type(toty)) else: # Unsigned upcast return builder.zext(val, self.get_value_type(toty)) elif fromty in types.real_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty == types.float32 and toty == types.float64: return builder.fpext(val, lty) elif fromty == types.float64 and toty == types.float32: return builder.fptrunc(val, lty) elif fromty in types.real_domain and toty in types.complex_domain: if fromty == types.float32: if toty == types.complex128: real = self.cast(builder, val, fromty, types.float64) else: real = val elif fromty == types.float64: if toty == types.complex64: real = self.cast(builder, val, fromty, types.float32) else: real = val if toty == types.complex128: imag = self.get_constant(types.float64, 0) elif toty == types.complex64: imag = self.get_constant(types.float32, 0) else: raise Exception("unreachable") cmplx = self.make_complex(toty)(self, builder) cmplx.real = real cmplx.imag = imag return cmplx._getvalue() elif fromty in types.integer_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty in types.signed_domain: return builder.sitofp(val, lty) else: return builder.uitofp(val, lty) elif toty in types.integer_domain and fromty in types.real_domain: lty = self.get_value_type(toty) if toty in types.signed_domain: return builder.fptosi(val, lty) else: return builder.fptoui(val, lty) elif fromty in types.integer_domain and toty in types.complex_domain: cmplxcls, flty = builtins.get_complex_info(toty) cmpl = cmplxcls(self, builder) cmpl.real = self.cast(builder, val, fromty, flty) cmpl.imag = self.get_constant(flty, 0) return cmpl._getvalue() elif fromty in types.complex_domain and toty in types.complex_domain: srccls, srcty = builtins.get_complex_info(fromty) dstcls, dstty = builtins.get_complex_info(toty) src = srccls(self, builder, value=val) dst = dstcls(self, builder) dst.real = self.cast(builder, src.real, srcty, dstty) dst.imag = self.cast(builder, src.imag, srcty, dstty) return dst._getvalue() elif (isinstance(toty, types.UniTuple) and isinstance(fromty, types.UniTuple) and len(fromty) == len(toty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [self.cast(builder, i, fromty.dtype, toty.dtype) for i in olditems] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif (isinstance(fromty, (types.UniTuple, types.Tuple)) and isinstance(toty, (types.UniTuple, types.Tuple)) and len(toty) == len(fromty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [self.cast(builder, i, f, t) for i, f, t in zip(olditems, fromty, toty)] tup = self.get_constant_undef(toty) for idx, val in enumerate(items): tup = builder.insert_value(tup, val, idx) return tup elif toty == types.boolean: return self.is_true(builder, fromty, val) elif fromty == types.boolean: # first promote to int32 asint = builder.zext(val, Type.int()) # then promote to number return self.cast(builder, asint, types.int32, toty) elif fromty == types.none and isinstance(toty, types.Optional): return self.make_optional_none(builder, toty.type) elif isinstance(toty, types.Optional): casted = self.cast(builder, val, fromty, toty.type) return self.make_optional_value(builder, toty.type, casted) elif isinstance(fromty, types.Optional): optty = self.make_optional(fromty) optval = optty(self, builder, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) with cgutils.if_unlikely(builder, builder.not_(validbit)): msg = "expected %s, got None" % (fromty.type,) self.call_conv.return_user_exc(builder, TypeError, (msg,)) return optval.data elif (isinstance(fromty, types.Array) and isinstance(toty, types.Array)): # Type inference should have prevented illegal array casting. assert toty.layout == 'A' return val elif fromty in types.integer_domain and toty == types.voidptr: return builder.inttoptr(val, self.get_value_type(toty)) raise NotImplementedError("cast", val, fromty, toty)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_exhausted = cgutils.as_bool_bit( builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, is_exhausted): result.set_valid(False) builder.branch(bbend) result.set_valid(True) # Current pointer inside last dimension last_ptr = cgutils.gep(builder, pointers, ndim - 1) ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate() => yield (indices, value) idxvals = [builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim)] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) # Update indices and pointers by walking from inner # dimension to outer. for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.add(builder.load(idxptr), one) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => pointer can simply be incremented. builder.store(idx, idxptr) ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(ptr, ptrptr) # Reset pointers in inner dimensions for inner_dim in range(dim + 1, ndim): ptrptr = cgutils.gep(builder, pointers, inner_dim) builder.store(ptr, ptrptr) builder.branch(bbend) # Reset index and continue with next dimension builder.store(zero, idxptr) # End of array builder.store(cgutils.true_byte, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_exhausted = cgutils.as_bool_bit( builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, is_exhausted): result.set_valid(False) builder.branch(bbend) result.set_valid(True) # Current pointer inside last dimension last_ptr = cgutils.gep(builder, pointers, ndim - 1) ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate() => yield (indices, value) idxvals = [ builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim) ] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) # Update indices and pointers by walking from inner # dimension to outer. for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.add(builder.load(idxptr), one) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => pointer can simply be incremented. builder.store(idx, idxptr) ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(ptr, ptrptr) # Reset pointers in inner dimensions for inner_dim in range(dim + 1, ndim): ptrptr = cgutils.gep(builder, pointers, inner_dim) builder.store(ptr, ptrptr) builder.branch(bbend) # Reset index and continue with next dimension builder.store(zero, idxptr) # End of array builder.store(cgutils.true_byte, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def cast(self, builder, val, fromty, toty): if fromty == toty or toty == types.Any or isinstance(toty, types.Kind): return val elif isinstance(fromty, types.Integer) and isinstance(toty, types.Integer): if toty.bitwidth == fromty.bitwidth: # Just a change of signedness return val elif toty.bitwidth < fromty.bitwidth: # Downcast return builder.trunc(val, self.get_value_type(toty)) elif fromty.signed: # Signed upcast return builder.sext(val, self.get_value_type(toty)) else: # Unsigned upcast return builder.zext(val, self.get_value_type(toty)) elif fromty in types.real_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty == types.float32 and toty == types.float64: return builder.fpext(val, lty) elif fromty == types.float64 and toty == types.float32: return builder.fptrunc(val, lty) elif fromty in types.real_domain and toty in types.complex_domain: if fromty == types.float32: if toty == types.complex128: real = self.cast(builder, val, fromty, types.float64) else: real = val elif fromty == types.float64: if toty == types.complex64: real = self.cast(builder, val, fromty, types.float32) else: real = val if toty == types.complex128: imag = self.get_constant(types.float64, 0) elif toty == types.complex64: imag = self.get_constant(types.float32, 0) else: raise Exception("unreachable") cmplx = self.make_complex(toty)(self, builder) cmplx.real = real cmplx.imag = imag return cmplx._getvalue() elif fromty in types.integer_domain and toty in types.real_domain: lty = self.get_value_type(toty) if fromty in types.signed_domain: return builder.sitofp(val, lty) else: return builder.uitofp(val, lty) elif toty in types.integer_domain and fromty in types.real_domain: lty = self.get_value_type(toty) if toty in types.signed_domain: return builder.fptosi(val, lty) else: return builder.fptoui(val, lty) elif fromty in types.integer_domain and toty in types.complex_domain: cmplxcls, flty = builtins.get_complex_info(toty) cmpl = cmplxcls(self, builder) cmpl.real = self.cast(builder, val, fromty, flty) cmpl.imag = self.get_constant(flty, 0) return cmpl._getvalue() elif fromty in types.complex_domain and toty in types.complex_domain: srccls, srcty = builtins.get_complex_info(fromty) dstcls, dstty = builtins.get_complex_info(toty) src = srccls(self, builder, value=val) dst = dstcls(self, builder) dst.real = self.cast(builder, src.real, srcty, dstty) dst.imag = self.cast(builder, src.imag, srcty, dstty) return dst._getvalue() elif (isinstance(fromty, (types.UniTuple, types.Tuple)) and isinstance(toty, (types.UniTuple, types.Tuple)) and len(toty) == len(fromty)): olditems = cgutils.unpack_tuple(builder, val, len(fromty)) items = [self.cast(builder, i, f, t) for i, f, t in zip(olditems, fromty, toty)] return cgutils.make_anonymous_struct(builder, items) elif toty == types.boolean: return self.is_true(builder, fromty, val) elif fromty == types.boolean: # first promote to int32 asint = builder.zext(val, Type.int()) # then promote to number return self.cast(builder, asint, types.int32, toty) elif fromty == types.none and isinstance(toty, types.Optional): return self.make_optional_none(builder, toty.type) elif isinstance(toty, types.Optional): casted = self.cast(builder, val, fromty, toty.type) return self.make_optional_value(builder, toty.type, casted) elif isinstance(fromty, types.Optional): optty = self.make_optional(fromty) optval = optty(self, builder, value=val) validbit = cgutils.as_bool_bit(builder, optval.valid) with cgutils.if_unlikely(builder, builder.not_(validbit)): msg = "expected %s, got None" % (fromty.type,) self.call_conv.return_user_exc(builder, TypeError, (msg,)) return optval.data elif (isinstance(fromty, types.Array) and isinstance(toty, types.Array)): # Type inference should have prevented illegal array casting. assert toty.layout == 'A' return val elif (isinstance(fromty, types.List) and isinstance(toty, types.List)): # Casting from non-reflected to reflected assert fromty.dtype == toty.dtype return val elif (isinstance(fromty, types.RangeType) and isinstance(toty, types.RangeType)): olditems = cgutils.unpack_tuple(builder, val, 3) items = [self.cast(builder, v, fromty.dtype, toty.dtype) for v in olditems] return cgutils.make_anonymous_struct(builder, items) elif fromty in types.integer_domain and toty == types.voidptr: return builder.inttoptr(val, self.get_value_type(toty)) raise NotImplementedError("cast", val, fromty, toty)