def access_member(member_offset): # Access member by byte offset offset = c.context.get_constant(types.uintp, member_offset) llvoidptr = ir.IntType(8).as_pointer() ptr = cgutils.pointer_add(c.builder, val, offset) casted = c.builder.bitcast(ptr, llvoidptr.as_pointer()) return c.builder.load(casted)
def load_direct(self, byteoffset): """ Generic load from the given *byteoffset*. load_aligned() is preferred if possible. """ ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) return self.context.unpack_value(self.builder, self.fe_type, ptr)
def get_array_at_offset(self, ind): context = self.context builder = self.builder arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A") arycls = context.make_array(arytyp) array = arycls(context, builder) offseted_data = cgutils.pointer_add(self.builder, self.data, self.builder.mul(self.core_step, ind)) if not self.as_scalar: shape = cgutils.pack_array(builder, self.shape) strides = cgutils.pack_array(builder, self.strides) else: one = context.get_constant(types.intp, 1) zero = context.get_constant(types.intp, 0) shape = cgutils.pack_array(builder, [one]) strides = cgutils.pack_array(builder, [zero]) itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype)) context.populate_array(array, data=builder.bitcast(offseted_data, array.data.type), shape=shape, strides=strides, itemsize=context.get_constant(types.intp, itemsize), meminfo=None) return array._getvalue()
def load_direct(self, byteoffset): """ Generic load from the given *byteoffset*. load_aligned() is preferred if possible. """ ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) return self.context.unpack_value(self.builder, self.fe_type, ptr)
def iternext_specific(self, context, builder, arrty, arr, result): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) ndim = arrty.ndim nitems = arr.nitems index = builder.load(self.index) is_valid = builder.icmp(lc.ICMP_SLT, index, nitems) result.set_valid(is_valid) with cgutils.if_likely(builder, is_valid): ptr = builder.load(self.pointer) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate(): fetch and increment indices indices = self.indices idxvals = [ builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim) ] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct( builder, [idxtuple, value])) _increment_indices_array(context, builder, arrty, arr, indices) index = builder.add(index, one) builder.store(index, self.index) ptr = cgutils.pointer_add(builder, ptr, self.stride) builder.store(ptr, self.pointer)
def access_member(member_offset): # Access member by byte offset offset = c.context.get_constant(types.uintp, member_offset) llvoidptr = ir.IntType(8).as_pointer() ptr = cgutils.pointer_add(c.builder, val, offset) casted = c.builder.bitcast(ptr, llvoidptr.as_pointer()) return c.builder.load(casted)
def iternext_specific(self, context, builder, arrty, arr, result): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) ndim = arrty.ndim nitems = arr.nitems index = builder.load(self.index) is_valid = builder.icmp(lc.ICMP_SLT, index, nitems) result.set_valid(is_valid) with cgutils.if_likely(builder, is_valid): ptr = builder.load(self.pointer) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate(): fetch and increment indices indices = self.indices idxvals = [builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim)] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) _increment_indices_array(context, builder, arrty, arr, indices) index = builder.add(index, one) builder.store(index, self.index) ptr = cgutils.pointer_add(builder, ptr, self.stride) builder.store(ptr, self.pointer)
def get_array_at_offset(self, ind): context = self.context builder = self.builder arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A") arycls = context.make_array(arytyp) array = arycls(context, builder) offseted_data = cgutils.pointer_add( self.builder, self.data, self.builder.mul(self.core_step, ind)) if not self.as_scalar: shape = cgutils.pack_array(builder, self.shape) strides = cgutils.pack_array(builder, self.strides) else: one = context.get_constant(types.intp, 1) zero = context.get_constant(types.intp, 0) shape = cgutils.pack_array(builder, [one]) strides = cgutils.pack_array(builder, [zero]) itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype)) context.populate_array(array, data=builder.bitcast(offseted_data, array.data.type), shape=shape, strides=strides, itemsize=context.get_constant( types.intp, itemsize), meminfo=None) return array._getvalue()
def get_env_body(self, builder, envptr): """ From the given *envptr* (a pointer to a _dynfunc.Environment object), get a EnvBody allowing structured access to environment fields. """ body_ptr = cgutils.pointer_add( builder, envptr, _dynfunc._impl_info['offsetof_env_body']) return EnvBody(self, builder, ref=body_ptr, cast_ref=True)
def get_env_body(self, builder, envptr): """ From the given *envptr* (a pointer to a _dynfunc.Environment object), get a EnvBody allowing structured access to environment fields. """ body_ptr = cgutils.pointer_add( builder, envptr, _dynfunc._impl_info['offset_env_body']) return EnvBody(self, builder, ref=body_ptr, cast_ref=True)
def get_generator_state(self, builder, genptr, return_type): """ From the given *genptr* (a pointer to a _dynfunc.Generator object), get a pointer to its state area. """ return cgutils.pointer_add( builder, genptr, _dynfunc._impl_info['offsetof_generator_state'], return_type=return_type)
def get_generator_state(self, builder, genptr, return_type): """ From the given *genptr* (a pointer to a _dynfunc.Generator object), get a pointer to its state area. """ return cgutils.pointer_add( builder, genptr, _dynfunc._impl_info['offsetof_generator_state'], return_type=return_type)
def get_env_from_closure(self, builder, clo): """ From the pointer *clo* to a _dynfunc.Closure, get a pointer to the enclosed _dynfunc.Environment. """ clo_body_ptr = cgutils.pointer_add( builder, clo, _dynfunc._impl_info['offset_closure_body']) clo_body = ClosureBody(self, builder, ref=clo_body_ptr, cast_ref=True) return clo_body.env
def get_env_from_closure(self, builder, clo): """ From the pointer *clo* to a _dynfunc.Closure, get a pointer to the enclosed _dynfunc.Environment. """ clo_body_ptr = cgutils.pointer_add( builder, clo, _dynfunc._impl_info['offset_closure_body']) clo_body = ClosureBody(self, builder, ref=clo_body_ptr, cast_ref=True) return clo_body.env
def get_env_from_closure(self, builder, clo): """ From the pointer *clo* to a _dynfunc.Closure, get a pointer to the enclosed _dynfunc.Environment. """ with cgutils.if_unlikely(builder, cgutils.is_null(builder, clo)): self.debug_print(builder, "Fatal error: missing _dynfunc.Closure") builder.unreachable() clo_body_ptr = cgutils.pointer_add( builder, clo, _dynfunc._impl_info['offsetof_closure_body']) clo_body = ClosureBody(self, builder, ref=clo_body_ptr, cast_ref=True) return clo_body.env
def get_env_from_closure(self, builder, clo): """ From the pointer *clo* to a _dynfunc.Closure, get a pointer to the enclosed _dynfunc.Environment. """ with cgutils.if_unlikely(builder, cgutils.is_null(builder, clo)): self.debug_print(builder, "Fatal error: missing _dynfunc.Closure") builder.unreachable() clo_body_ptr = cgutils.pointer_add( builder, clo, _dynfunc._impl_info['offsetof_closure_body']) clo_body = ClosureBody(self, builder, ref=clo_body_ptr, cast_ref=True) return clo_body.env
def iternext_specific(self, context, builder, arrty, arr, result): nitems = arr.nitems index = builder.load(self.index) is_valid = builder.icmp(lc.ICMP_SLT, index, nitems) result.set_valid(is_valid) with cgutils.if_likely(builder, is_valid): ptr = builder.load(self.pointer) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) index = builder.add(index, context.get_constant(types.intp, 1)) builder.store(index, self.index) ptr = cgutils.pointer_add(builder, ptr, self.stride) builder.store(ptr, self.pointer)
def iternext_specific(self, context, builder, arrty, arr, result): nitems = arr.nitems index = builder.load(self.index) is_valid = builder.icmp(lc.ICMP_SLT, index, nitems) result.set_valid(is_valid) with cgutils.if_likely(builder, is_valid): ptr = builder.load(self.pointer) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) index = builder.add(index, context.get_constant(types.intp, 1)) builder.store(index, self.index) ptr = cgutils.pointer_add(builder, ptr, self.stride) builder.store(ptr, self.pointer)
def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) data = arr.data ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant( types.intp, arrty.ndim)) pointers = cgutils.alloca_once(builder, data.type, size=context.get_constant( types.intp, arrty.ndim)) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) empty = cgutils.alloca_once_value(builder, cgutils.false_byte) # Initialize each dimension with the next index and pointer # values. For the last (inner) dimension, this is 0 and the # start pointer, for the other dimensions, this is 1 and the # pointer to the next subarray after start. for dim in range(ndim): idxptr = cgutils.gep(builder, indices, dim) ptrptr = cgutils.gep(builder, pointers, dim) if dim == ndim - 1: builder.store(zero, idxptr) builder.store(data, ptrptr) else: p = cgutils.pointer_add(builder, data, strides[dim]) builder.store(p, ptrptr) builder.store(one, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic (see issue #846). dim_size = shapes[dim] dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, empty) self.indices = indices self.pointers = pointers self.empty = empty
def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) data = arr.data ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant(types.intp, arrty.ndim)) pointers = cgutils.alloca_once(builder, data.type, size=context.get_constant(types.intp, arrty.ndim)) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) empty = cgutils.alloca_once_value(builder, cgutils.false_byte) # Initialize each dimension with the next index and pointer # values. For the last (inner) dimension, this is 0 and the # start pointer, for the other dimensions, this is 1 and the # pointer to the next subarray after start. for dim in range(ndim): idxptr = cgutils.gep(builder, indices, dim) ptrptr = cgutils.gep(builder, pointers, dim) if dim == ndim - 1: builder.store(zero, idxptr) builder.store(data, ptrptr) else: p = cgutils.pointer_add(builder, data, strides[dim]) builder.store(p, ptrptr) builder.store(one, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic (see issue #846). dim_size = shapes[dim] dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, empty) self.indices = indices self.pointers = pointers self.empty = empty
def load(self, context, builder, data, ind): arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A") arycls = context.make_array(arytyp) array = arycls(context, builder) offseted_data = cgutils.pointer_add(builder, data, builder.mul(self.core_step, ind)) shape, strides = self._shape_and_strides(context, builder) itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype)) context.populate_array(array, data=builder.bitcast(offseted_data, array.data.type), shape=shape, strides=strides, itemsize=context.get_constant( types.intp, itemsize), meminfo=None) return array._getvalue()
def load(self, context, builder, data, ind): arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A") arycls = context.make_array(arytyp) array = arycls(context, builder) offseted_data = cgutils.pointer_add(builder, data, builder.mul(self.core_step, ind)) shape, strides = self._shape_and_strides(context, builder) itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype)) context.populate_array(array, data=builder.bitcast(offseted_data, array.data.type), shape=shape, strides=strides, itemsize=context.get_constant(types.intp, itemsize), meminfo=None) return array._getvalue()
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) minus_one = context.get_constant(types.intp, -1) result.set_valid(True) bbcont = cgutils.append_basic_block(builder, 'continued') bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_empty = cgutils.as_bool_bit(builder, builder.load(self.empty)) with cgutils.if_unlikely(builder, is_empty): result.set_valid(False) builder.branch(bbend) # Current pointer inside last dimension last_ptr = cgutils.alloca_once(builder, data.type) # Walk from inner dimension to outer for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.load(idxptr) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => we point to the right slot ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) builder.store(ptr, last_ptr) # Compute next index and pointer for this dimension next_ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(next_ptr, ptrptr) next_idx = builder.add(idx, one) builder.store(next_idx, idxptr) # Reset inner dimensions for inner_dim in range(dim + 1, ndim): idxptr = cgutils.gep(builder, indices, inner_dim) ptrptr = cgutils.gep(builder, pointers, inner_dim) # Compute next index and pointer for this dimension inner_ptr = cgutils.pointer_add( builder, ptr, strides[inner_dim]) builder.store(inner_ptr, ptrptr) builder.store(one, idxptr) builder.branch(bbcont) # End of array => skip to end result.set_valid(False) builder.branch(bbend) builder.position_at_end(bbcont) # After processing of indices and pointers: fetch value. ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) builder.branch(bbend) builder.position_at_end(bbend)
def next(self, i): self.array.data = cgutils.pointer_add(self.builder, self.array.data, self.core_step)
def load_direct(self, byteoffset): ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) return self.context.unpack_value(self.builder, self.fe_type, ptr)
def load_direct(self, offset): ptr = cgutils.pointer_add(self.builder, self.data, offset) if self.byref: return ptr else: return self.builder.load(ptr)
def store_direct(self, value, offset): ptr = cgutils.pointer_add(self.builder, self.data, offset) assert ptr.type.pointee == value.type, (ptr.type, value.type) self.builder.store(value, ptr)
def set_member(member_offset, value): # Access member by byte offset offset = c.context.get_constant(types.uintp, member_offset) ptr = cgutils.pointer_add(c.builder, box, offset) casted = c.builder.bitcast(ptr, llvoidptr.as_pointer()) c.builder.store(value, casted)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_exhausted = cgutils.as_bool_bit( builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, is_exhausted): result.set_valid(False) builder.branch(bbend) result.set_valid(True) # Current pointer inside last dimension last_ptr = cgutils.gep(builder, pointers, ndim - 1) ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate() => yield (indices, value) idxvals = [ builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim) ] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) # Update indices and pointers by walking from inner # dimension to outer. for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.add(builder.load(idxptr), one) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => pointer can simply be incremented. builder.store(idx, idxptr) ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(ptr, ptrptr) # Reset pointers in inner dimensions for inner_dim in range(dim + 1, ndim): ptrptr = cgutils.gep(builder, pointers, inner_dim) builder.store(ptr, ptrptr) builder.branch(bbend) # Reset index and continue with next dimension builder.store(zero, idxptr) # End of array builder.store(cgutils.true_byte, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def store_direct(self, value, offset): ptr = cgutils.pointer_add(self.builder, self.data, offset) assert ptr.type.pointee == value.type self.builder.store(value, ptr)
def load_direct(self, offset): ptr = cgutils.pointer_add(self.builder, self.data, offset) if self.byref: return ptr else: return self.builder.load(ptr)
def next(self, i): self.array.data = cgutils.pointer_add(self.builder, self.array.data, self.core_step)
def set_member(member_offset, value): # Access member by byte offset offset = c.context.get_constant(types.uintp, member_offset) ptr = cgutils.pointer_add(c.builder, box, offset) casted = c.builder.bitcast(ptr, llvoidptr.as_pointer()) c.builder.store(value, casted)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) minus_one = context.get_constant(types.intp, -1) result.set_valid(True) bbcont = cgutils.append_basic_block(builder, 'continued') bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_empty = cgutils.as_bool_bit(builder, builder.load(self.empty)) with cgutils.if_unlikely(builder, is_empty): result.set_valid(False) builder.branch(bbend) # Current pointer inside last dimension last_ptr = cgutils.alloca_once(builder, data.type) # Walk from inner dimension to outer for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.load(idxptr) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => we point to the right slot ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) builder.store(ptr, last_ptr) # Compute next index and pointer for this dimension next_ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(next_ptr, ptrptr) next_idx = builder.add(idx, one) builder.store(next_idx, idxptr) # Reset inner dimensions for inner_dim in range(dim + 1, ndim): idxptr = cgutils.gep(builder, indices, inner_dim) ptrptr = cgutils.gep(builder, pointers, inner_dim) # Compute next index and pointer for this dimension inner_ptr = cgutils.pointer_add(builder, ptr, strides[inner_dim]) builder.store(inner_ptr, ptrptr) builder.store(one, idxptr) builder.branch(bbcont) # End of array => skip to end result.set_valid(False) builder.branch(bbend) builder.position_at_end(bbcont) # After processing of indices and pointers: fetch value. ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) result.yield_(value) builder.branch(bbend) builder.position_at_end(bbend)
def store_direct(self, value, byteoffset): ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) self.context.pack_value(self.builder, self.fe_type, value, ptr)
def store_direct(self, value, byteoffset): ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) self.context.pack_value(self.builder, self.fe_type, value, ptr)
def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim data = arr.data shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) one = context.get_constant(types.intp, 1) bbend = cgutils.append_basic_block(builder, 'end') # Catch already computed iterator exhaustion is_exhausted = cgutils.as_bool_bit( builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, is_exhausted): result.set_valid(False) builder.branch(bbend) result.set_valid(True) # Current pointer inside last dimension last_ptr = cgutils.gep(builder, pointers, ndim - 1) ptr = builder.load(last_ptr) value = context.unpack_value(builder, arrty.dtype, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate() => yield (indices, value) idxvals = [builder.load(cgutils.gep(builder, indices, dim)) for dim in range(ndim)] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) # Update indices and pointers by walking from inner # dimension to outer. for dim in reversed(range(ndim)): idxptr = cgutils.gep(builder, indices, dim) idx = builder.add(builder.load(idxptr), one) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp(lc.ICMP_SLT, idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => pointer can simply be incremented. builder.store(idx, idxptr) ptrptr = cgutils.gep(builder, pointers, dim) ptr = builder.load(ptrptr) ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(ptr, ptrptr) # Reset pointers in inner dimensions for inner_dim in range(dim + 1, ndim): ptrptr = cgutils.gep(builder, pointers, inner_dim) builder.store(ptr, ptrptr) builder.branch(bbend) # Reset index and continue with next dimension builder.store(zero, idxptr) # End of array builder.store(cgutils.true_byte, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend)
def load_direct(self, byteoffset): ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) return self.context.unpack_value(self.builder, self.fe_type, ptr)