def int_power_func_body(context, builder, x, y): pcounter = builder.alloca(y.type) presult = builder.alloca(x.type) result = Constant.int(x.type, 1) counter = y builder.store(counter, pcounter) builder.store(result, presult) bbcond = cgutils.append_basic_block(builder, ".cond") bbbody = cgutils.append_basic_block(builder, ".body") bbexit = cgutils.append_basic_block(builder, ".exit") del counter del result builder.branch(bbcond) with cgutils.goto_block(builder, bbcond): counter = builder.load(pcounter) ONE = Constant.int(counter.type, 1) ZERO = Constant.null(counter.type) builder.store(builder.sub(counter, ONE), pcounter) pred = builder.icmp(lc.ICMP_SGT, counter, ZERO) builder.cbranch(pred, bbbody, bbexit) with cgutils.goto_block(builder, bbbody): result = builder.load(presult) builder.store(builder.mul(result, x), presult) builder.branch(bbcond) builder.position_at_end(bbexit) return builder.load(presult)
def getitem_unituple(context, builder, sig, args): tupty, _ = sig.args tup, idx = args bbelse = cgutils.append_basic_block(builder, "switch.else") bbend = cgutils.append_basic_block(builder, "switch.end") switch = builder.switch(idx, bbelse, n=tupty.count) with cgutils.goto_block(builder, bbelse): context.return_errcode(builder, errcode.OUT_OF_BOUND_ERROR) lrtty = context.get_value_type(tupty.dtype) with cgutils.goto_block(builder, bbend): phinode = builder.phi(lrtty) for i in range(tupty.count): ki = context.get_constant(types.intp, i) bbi = cgutils.append_basic_block(builder, "switch.%d" % i) switch.add_case(ki, bbi) with cgutils.goto_block(builder, bbi): value = builder.extract_value(tup, i) builder.branch(bbend) phinode.add_incoming(value, bbi) builder.position_at_end(bbend) return phinode
def add_arg(self, obj, ty): """ Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through: arg2.err -> arg1.err -> arg0.err -> arg.end (returns) """ # Unbox argument val, dtor = self.api.to_native_arg(self.builder.load(obj), ty) # check for Python C-API Error error_check = self.api.err_occurred() err_happened = self.builder.icmp(lc.ICMP_NE, error_check, self.api.get_null_object()) # Write the cleanup block cleanupblk = cgutils.append_basic_block(self.builder, "arg%d.err" % self.arg_count) with cgutils.goto_block(self.builder, cleanupblk): dtor() # Go to next cleanup block self.builder.branch(self.nextblk) # If an error occurred, go to the cleanup block with cgutils.if_unlikely(self.builder, err_happened): self.builder.branch(cleanupblk) self.cleanups.append(dtor) self.nextblk = cleanupblk self.arg_count += 1 return val
def add_arg(self, obj, ty): """ Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through: arg2.err -> arg1.err -> arg0.err -> arg.end (returns) """ # Unbox argument native = self.api.to_native_value(self.builder.load(obj), ty) # If an error occurred, go to the cleanup block for the previous argument. with cgutils.if_unlikely(self.builder, native.is_error): self.builder.branch(self.nextblk) # Write the cleanup block for this argument cleanupblk = cgutils.append_basic_block(self.builder, "arg%d.err" % self.arg_count) with cgutils.goto_block(self.builder, cleanupblk): if native.cleanup is not None: native.cleanup() self.cleanups.append(native.cleanup) # Go to next cleanup block self.builder.branch(self.nextblk) self.nextblk = cleanupblk self.arg_count += 1 return native.value
def build_increment_blocks(inp_indices, inp_shape, inp_ndim, inp_num): bb_inc_inp_index = [ cgutils.append_basic_block( builder, '.inc_inp{0}_index{1}'.format(inp_num, str(i))) for i in range(inp_ndim) ] bb_end_inc_index = cgutils.append_basic_block( builder, '.end_inc{0}_index'.format(inp_num)) builder.branch(bb_inc_inp_index[0]) for i in range(inp_ndim): with cgutils.goto_block(builder, bb_inc_inp_index[i]): # If the shape of this dimension is 1, then leave the # index at 0 so that this dimension is broadcasted over # the corresponding input and output dimensions. cond = builder.icmp(ICMP_UGT, inp_shape[i], ONE) with cgutils.ifthen(builder, cond): builder.store(indices[out_ndim - inp_ndim + i], inp_indices[i]) if i + 1 == inp_ndim: builder.branch(bb_end_inc_index) else: builder.branch(bb_inc_inp_index[i + 1]) builder.position_at_end(bb_end_inc_index)
def __init__(self, builder, api, nargs): self.builder = builder self.api = api self.arg_count = 0 # how many function arguments have been processed self.cleanups = [] # set up switch for error processing of function arguments self.elseblk = cgutils.append_basic_block(self.builder, "arg.ok") with cgutils.goto_block(self.builder, self.elseblk): self.builder.ret(self.api.get_null_object()) self.swtblk = cgutils.append_basic_block(self.builder, ".arg.err") with cgutils.goto_block(self.builder, self.swtblk): self.swt_val = cgutils.alloca_once(self.builder, Type.int(32)) self.swt = self.builder.switch(self.builder.load(self.swt_val), self.elseblk, nargs) self.prev = self.elseblk
def alloca(self, name, ltype=None): """ Allocate a stack slot and initialize it to NULL. The default is to allocate a pyobject pointer. Use ``ltype`` to override. """ if ltype is None: ltype = self.context.get_value_type(types.pyobject) with cgutils.goto_block(self.builder, self.entry_block): ptr = self.builder.alloca(ltype, name=name) self.builder.store(cgutils.get_null_value(ltype), ptr) return ptr
def build_wrapper(self, api, builder, closure, args, kws): nargs = len(self.fndesc.args) keywords = self.make_keywords(self.fndesc.args) fmt = self.make_const_string("O" * nargs) objs = [api.alloca_obj() for _ in range(nargs)] parseok = api.parse_tuple_and_keywords(args, kws, fmt, keywords, *objs) pred = builder.icmp(lc.ICMP_EQ, parseok, Constant.null(parseok.type)) with cgutils.if_unlikely(builder, pred): builder.ret(api.get_null_object()) # Block that returns after erroneous argument unboxing/cleanup endblk = cgutils.append_basic_block(builder, "arg.end") with cgutils.goto_block(builder, endblk): builder.ret(api.get_null_object()) cleanup_manager = _ArgManager(builder, api, endblk, nargs) innerargs = [] for obj, ty in zip(objs, self.fndesc.argtypes): val = cleanup_manager.add_arg(obj, ty) innerargs.append(val) if self.release_gil: cleanup_manager = _GilManager(builder, api, cleanup_manager) # The wrapped function doesn't take a full closure, only # the Environment object. env = self.context.get_env_from_closure(builder, closure) status, res = self.context.call_function(builder, self.func, self.fndesc.restype, self.fndesc.argtypes, innerargs, env) # Do clean up cleanup_manager.emit_cleanup() # Determine return status with cgutils.if_likely(builder, status.ok): with cgutils.ifthen(builder, status.none): api.return_none() retval = api.from_native_return(res, self.fndesc.restype) builder.ret(retval) with cgutils.ifthen(builder, builder.not_(status.exc)): # !ok && !exc # User exception raised self.make_exception_switch(api, builder, status.code) # !ok && exc builder.ret(api.get_null_object())
def __init__(self, builder, api, nargs): self.builder = builder self.api = api self.arg_count = 0 # how many function arguments have been processed self.cleanups = [] # Block that returns after erroneous argument unboxing/cleanup self.endblk = cgutils.append_basic_block(self.builder, "arg.end") with cgutils.goto_block(self.builder, self.endblk): self.builder.ret(self.api.get_null_object()) self.nextblk = self.endblk
def build_wrapper(self, api, builder, closure, args, kws): nargs = len(self.fndesc.args) objs = [api.alloca_obj() for _ in range(nargs)] parseok = api.unpack_tuple(args, self.fndesc.qualname, nargs, nargs, *objs) pred = builder.icmp(lc.ICMP_EQ, parseok, Constant.null(parseok.type)) with cgutils.if_unlikely(builder, pred): builder.ret(api.get_null_object()) # Block that returns after erroneous argument unboxing/cleanup endblk = cgutils.append_basic_block(builder, "arg.end") with cgutils.goto_block(builder, endblk): builder.ret(api.get_null_object()) cleanup_manager = _ArgManager(self.context, builder, api, endblk, nargs) innerargs = [] for obj, ty in zip(objs, self.fndesc.argtypes): val = cleanup_manager.add_arg(obj, ty) innerargs.append(val) if self.release_gil: cleanup_manager = _GilManager(builder, api, cleanup_manager) # Extract the Environment object from the Closure envptr = self.context.get_env_from_closure(builder, closure) env_body = self.context.get_env_body(builder, envptr) env_manager = api.get_env_manager(self.env, env_body) status, res = self.context.call_conv.call_function( builder, self.func, self.fndesc.restype, self.fndesc.argtypes, innerargs, envptr) # Do clean up cleanup_manager.emit_cleanup() # Determine return status with cgutils.if_likely(builder, status.is_ok): # Ok => return boxed Python value with cgutils.ifthen(builder, status.is_none): api.return_none() retval = api.from_native_return(res, self._simplified_return_type(), env_manager) builder.ret(retval) with cgutils.ifthen(builder, builder.not_(status.is_python_exc)): # User exception raised self.make_exception_switch(api, builder, status) # Error out builder.ret(api.get_null_object())
def real_sign_impl(context, builder, sig, args): [x] = args POS = Constant.real(x.type, 1) NEG = Constant.real(x.type, -1) ZERO = Constant.real(x.type, 0) cmp_zero = builder.fcmp(lc.FCMP_OEQ, x, ZERO) cmp_pos = builder.fcmp(lc.FCMP_OGT, x, ZERO) presult = builder.alloca(x.type) bb_zero = cgutils.append_basic_block(builder, ".zero") bb_postest = cgutils.append_basic_block(builder, ".postest") bb_pos = cgutils.append_basic_block(builder, ".pos") bb_neg = cgutils.append_basic_block(builder, ".neg") bb_exit = cgutils.append_basic_block(builder, ".exit") builder.cbranch(cmp_zero, bb_zero, bb_postest) with cgutils.goto_block(builder, bb_zero): builder.store(ZERO, presult) builder.branch(bb_exit) with cgutils.goto_block(builder, bb_postest): builder.cbranch(cmp_pos, bb_pos, bb_neg) with cgutils.goto_block(builder, bb_pos): builder.store(POS, presult) builder.branch(bb_exit) with cgutils.goto_block(builder, bb_neg): builder.store(NEG, presult) builder.branch(bb_exit) builder.position_at_end(bb_exit) return builder.load(presult)
def add_arg(self, obj, ty): """ Unbox argument and emit code that handles any error during unboxing """ # Unbox argument val, dtor = self.api.to_native_arg(self.builder.load(obj), ty) self.cleanups.append(dtor) # add to the switch each time through the loop # prev and cur are references to keep track of which block to branch to if self.arg_count == 0: bb = cgutils.append_basic_block(self.builder, "arg%d.err" % self.arg_count) self.cur = bb self.swt.add_case(Constant.int(Type.int(32), self.arg_count), bb) else: # keep a reference to the previous arg.error block self.prev = self.cur bb = cgutils.append_basic_block(self.builder, "arg%d.error" % self.arg_count) self.cur = bb self.swt.add_case(Constant.int(Type.int(32), self.arg_count), bb) # write the error block with cgutils.goto_block(self.builder, self.cur): dtor() self.builder.branch(self.prev) # store arg count into value to switch on if there is an error self.builder.store(Constant.int(Type.int(32), self.arg_count), self.swt_val) # check for Python C-API Error error_check = self.api.err_occurred() err_happened = self.builder.icmp(lc.ICMP_NE, error_check, self.api.get_null_object()) # if error occurs -- clean up -- goto switch block with cgutils.if_unlikely(self.builder, err_happened): self.builder.branch(self.swtblk) self.arg_count += 1 return val
def build_increment_blocks(inp_indices, inp_shape, inp_ndim, inp_num): bb_inc_inp_index = [cgutils.append_basic_block(builder, '.inc_inp{0}_index{1}'.format(inp_num, str(i))) for i in range(inp_ndim)] bb_end_inc_index = cgutils.append_basic_block(builder, '.end_inc{0}_index'.format(inp_num)) builder.branch(bb_inc_inp_index[0]) for i in range(inp_ndim): with cgutils.goto_block(builder, bb_inc_inp_index[i]): # If the shape of this dimension is 1, then leave the # index at 0 so that this dimension is broadcasted over # the corresponding input and output dimensions. cond = builder.icmp(ICMP_UGT, inp_shape[i], ONE) with cgutils.ifthen(builder, cond): builder.store(indices[out_ndim-inp_ndim+i], inp_indices[i]) if i + 1 == inp_ndim: builder.branch(bb_end_inc_index) else: builder.branch(bb_inc_inp_index[i+1]) builder.position_at_end(bb_end_inc_index)
def add_arg(self, obj, ty): """ Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through: arg2.err -> arg1.err -> arg0.err -> arg.end (returns) """ # Unbox argument native = self.api.to_native_value(self.builder.load(obj), ty) # If an error occurred, go to the cleanup block for the previous argument. with cgutils.if_unlikely(self.builder, native.is_error): self.builder.branch(self.nextblk) # Write the cleanup block for this argument cleanupblk = cgutils.append_basic_block(self.builder, "arg%d.err" % self.arg_count) with cgutils.goto_block(self.builder, cleanupblk): # NRT cleanup if self.context.enable_nrt: def nrt_cleanup(): self.context.nrt_decref(self.builder, ty, native.value) nrt_cleanup() self.cleanups.append(nrt_cleanup) if native.cleanup is not None: native.cleanup() self.cleanups.append(native.cleanup) # Go to next cleanup block self.builder.branch(self.nextblk) self.nextblk = cleanupblk self.arg_count += 1 return native.value
def impl(context, builder, sig, args): [tyinp, tyout] = sig.args [inp, out] = args if isinstance(tyinp, types.Array): scalar_inp = False scalar_tyinp = tyinp.dtype inp_ndim = tyinp.ndim elif tyinp in types.number_domain: scalar_inp = True scalar_tyinp = tyinp inp_ndim = 1 else: raise TypeError('unknown type for input operand') out_ndim = tyout.ndim if asfloat: promote_type = types.float64 elif scalar_tyinp in types.real_domain: promote_type = types.float64 elif scalar_tyinp in types.signed_domain: promote_type = types.int64 else: promote_type = types.uint64 result_type = promote_type # Temporary hack for __ftol2 llvm bug. Don't allow storing # float results in uint64 array on windows. if result_type in types.real_domain and \ tyout.dtype is types.uint64 and \ sys.platform.startswith('win32'): raise TypeError('Cannot store result in uint64 array') sig = typing.signature(result_type, promote_type) if not scalar_inp: iary = context.make_array(tyinp)(context, builder, inp) oary = context.make_array(tyout)(context, builder, out) fnwork = context.get_function(funckey, sig) intpty = context.get_value_type(types.intp) if not scalar_inp: inp_shape = cgutils.unpack_tuple(builder, iary.shape, inp_ndim) inp_strides = cgutils.unpack_tuple(builder, iary.strides, inp_ndim) inp_data = iary.data inp_layout = tyinp.layout out_shape = cgutils.unpack_tuple(builder, oary.shape, out_ndim) out_strides = cgutils.unpack_tuple(builder, oary.strides, out_ndim) out_data = oary.data out_layout = tyout.layout ZERO = Constant.int(Type.int(intpty.width), 0) ONE = Constant.int(Type.int(intpty.width), 1) inp_indices = None if not scalar_inp: inp_indices = [] for i in range(inp_ndim): x = builder.alloca(Type.int(intpty.width)) builder.store(ZERO, x) inp_indices.append(x) loopshape = cgutils.unpack_tuple(builder, oary.shape, out_ndim) with cgutils.loop_nest(builder, loopshape, intp=intpty) as indices: # Increment input indices. # Since the output dimensions are already being incremented, # we'll use that to set the input indices. In order to # handle broadcasting, any input dimension of size 1 won't be # incremented. if not scalar_inp: bb_inc_inp_index = [cgutils.append_basic_block(builder, '.inc_inp_index' + str(i)) for i in range(inp_ndim)] bb_end_inc_index = cgutils.append_basic_block(builder, '.end_inc_index') builder.branch(bb_inc_inp_index[0]) for i in range(inp_ndim): with cgutils.goto_block(builder, bb_inc_inp_index[i]): # If the shape of this dimension is 1, then leave the # index at 0 so that this dimension is broadcasted over # the corresponding output dimension. cond = builder.icmp(ICMP_UGT, inp_shape[i], ONE) with cgutils.ifthen(builder, cond): # If number of input dimensions is less than output # dimensions, the input shape is right justified so # that last dimension of input shape corresponds to # last dimension of output shape. Therefore, index # output dimension starting at offset of diff of # input and output dimension count. builder.store(indices[out_ndim-inp_ndim+i], inp_indices[i]) # We have to check if this is last dimension and add # appropriate block terminator before beginning next # loop. if i + 1 == inp_ndim: builder.branch(bb_end_inc_index) else: builder.branch(bb_inc_inp_index[i+1]) builder.position_at_end(bb_end_inc_index) inds = [builder.load(index) for index in inp_indices] px = cgutils.get_item_pointer2(builder, data=inp_data, shape=inp_shape, strides=inp_strides, layout=inp_layout, inds=inds) x = builder.load(px) else: x = inp po = cgutils.get_item_pointer2(builder, data=out_data, shape=out_shape, strides=out_strides, layout=out_layout, inds=indices) d_x = context.cast(builder, x, scalar_tyinp, promote_type) tempres = fnwork(builder, [d_x]) res = context.cast(builder, tempres, result_type, tyout.dtype) builder.store(res, po) return out
def post_lower(self): with cgutils.goto_block(self.builder, self.ehblock): self.cleanup() self.context.return_exc(self.builder) self._finalize_frozen_string()
def post_lower(self): with cgutils.goto_block(self.builder, self.ehblock): self.cleanup() self.context.return_exc(self.builder)
def alloca_lltype(self, name, lltype): with cgutils.goto_block(self.builder, self.entry_block): return self.builder.alloca(lltype, name=name)