def ll_iter_next(it): it.index += 1 for i in unroll_ndim_rev: if it.coordinates[i] < it.dims_m1[i]: it.coordinates[i] += 1 it.dataptr = direct_ptradd(it.dataptr, it.strides[i]) break it.coordinates[i] = 0 it.dataptr = direct_ptradd(it.dataptr, -it.backstrides[i])
def ll_get_view(ARRAY, ao, tpl): array = ARRAY.ll_allocate(ndim) dataptr = direct_arrayitems(ao.data) src_i = 0 tgt_i = 0 for src_i, r_key in unroll_r_tuple: if isinstance(r_key, IntegerRepr): dataptr = direct_ptradd( dataptr, getattr(tpl, 'item%d' % src_i) * ao.strides[src_i]) elif r_key == rslice.startonly_slice_repr: start = getattr(tpl, 'item%d' % src_i) size = ao.shape[src_i] if start > size: start = size size -= start dataptr = direct_ptradd(dataptr, start * ao.strides[src_i]) array.shape[tgt_i] = size array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 elif r_key == rslice.startstop_slice_repr: start = getattr(tpl, 'item%d' % src_i).start stop = getattr(tpl, 'item%d' % src_i).stop size = ao.shape[src_i] if start > size: start = size dataptr = direct_ptradd(dataptr, start * ao.strides[src_i]) if stop < size: size = stop size -= start if size < 0: size = 0 array.shape[tgt_i] = size array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 else: assert 0 src_i += 1 # consume the rest of ndim as if we found more slices while tgt_i < ndim: array.shape[tgt_i] = ao.shape[src_i] array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 src_i += 1 ll_assert(tgt_i == ndim, "tgt_i == ndim") array.dataptr = dataptr array.data = ao.data # keep a ref return array
def ref(self, firstitemptr): A = lltype.typeOf(firstitemptr).TO if A == self.TYPE: # for array of containers parent, index = lltype.parentlink(firstitemptr._obj) assert parent, "%r is not within a container" % (firstitemptr,) assert isinstance(lltype.typeOf(parent), (lltype.Array, lltype.FixedSizeArray)), ( "%r is not within an array" % (firstitemptr,)) if isinstance(index, str): assert index.startswith('item') # itemN => N index = int(index[4:]) index += self.repeat if index == parent.getlength(): # for references exactly to the end of the array try: endmarker = _end_markers[parent] except KeyError: endmarker = _endmarker_struct(A, parent=parent, parentindex=index) _end_markers[parent] = endmarker return endmarker._as_ptr() else: return parent.getitem(index)._as_ptr() elif (isinstance(A, lltype.FixedSizeArray) and array_item_type_match(A.OF, self.TYPE)): # for array of primitives or pointers return lltype.direct_ptradd(firstitemptr, self.repeat) else: raise TypeError('got %r, expected %r' % (A, self.TYPE))
def get_addr_for_num(self, i): chunk_no, ofs = self._no_of(i) chunk = self.chunks[chunk_no] rffi.cast(lltype.Signed, chunk) return rffi.cast( lltype.Signed, lltype.direct_ptradd(lltype.direct_arrayitems(chunk), ofs))
def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, collect_static_in_prebuilt_gc): gc = self.tester.gc layoutbuilder = self.tester.layoutbuilder if collect_static_in_prebuilt_gc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs: if addrofaddr.address[0]: collect_static_in_prebuilt_gc(gc, addrofaddr) if collect_static_in_prebuilt_nongc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc: if addrofaddr.address[0]: collect_static_in_prebuilt_nongc(gc, addrofaddr) if collect_stack_root: stackroots = self.tester.stackroots a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw') for i in range(len(a)): a[i] = llmemory.cast_ptr_to_adr(stackroots[i]) a_base = lltype.direct_arrayitems(a) for i in range(len(a)): ai = lltype.direct_ptradd(a_base, i) collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai)) for i in range(len(a)): PTRTYPE = lltype.typeOf(stackroots[i]) stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE) lltype.free(a, flavor='raw')
def get_address_of_gcref(self, gcref): assert lltype.typeOf(gcref) == llmemory.GCREF # first look in the hashtable, using an inexact hash (fails after # the object moves) addr = llmemory.cast_ptr_to_adr(gcref) hash = llmemory.cast_adr_to_int(addr) hash -= hash >> self.HASHTABLE_BITS hash &= self.HASHTABLE_SIZE - 1 addr_ref = self.hashtable[hash] # the following test is safe anyway, because the addresses found # in the hashtable are always the addresses of nonmovable stuff # ('addr_ref' is an address inside self.list, not directly the # address of a real moving GC object -- that's 'addr_ref.address[0]'.) if addr_ref.address[0] == addr: return addr_ref # if it fails, add an entry to the list if self.nextindex == len(self.list): # reallocate first, increasing a bit the size every time self.oldlists.append(self.list) self.list = self.alloc_gcref_list(len(self.list) // 4 * 5) self.nextindex = 0 # add it index = self.nextindex self.list[index] = gcref addr_ref = lltype.direct_ptradd(lltype.direct_arrayitems(self.list), index) addr_ref = llmemory.cast_ptr_to_adr(addr_ref) self.nextindex = index + 1 # record it in the hashtable self.hashtable[hash] = addr_ref return addr_ref
def ref(self, firstitemptr): A = lltype.typeOf(firstitemptr).TO if A == self.TYPE: # for array of containers parent, index = lltype.parentlink(firstitemptr._obj) assert parent, "%r is not within a container" % (firstitemptr, ) assert isinstance( lltype.typeOf(parent), (lltype.Array, lltype.FixedSizeArray)), ("%r is not within an array" % (firstitemptr, )) if isinstance(index, str): assert index.startswith('item') # itemN => N index = int(index[4:]) index += self.repeat if index == parent.getlength(): # for references exactly to the end of the array try: endmarker = _end_markers[parent] except KeyError: endmarker = _endmarker_struct(A, parent=parent, parentindex=index) _end_markers[parent] = endmarker return endmarker._as_ptr() else: return parent.getitem(index)._as_ptr() elif (isinstance(A, lltype.FixedSizeArray) and array_item_type_match(A.OF, self.TYPE)): # for array of primitives or pointers return lltype.direct_ptradd(firstitemptr, self.repeat) else: raise TypeError('got %r, expected %r' % (A, self.TYPE))
def ll_get_view(ARRAY, ao, tpl): array = ARRAY.ll_allocate(ndim) dataptr = direct_arrayitems(ao.data) src_i = 0 tgt_i = 0 for src_i, r_key in unroll_r_tuple: if isinstance(r_key, IntegerRepr): dataptr = direct_ptradd(dataptr, getattr(tpl, 'item%d'%src_i)*ao.strides[src_i]) elif r_key == rslice.startonly_slice_repr: start = getattr(tpl, 'item%d'%src_i) size = ao.shape[src_i] if start > size: start = size size -= start dataptr = direct_ptradd(dataptr, start*ao.strides[src_i]) array.shape[tgt_i] = size array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 elif r_key == rslice.startstop_slice_repr: start = getattr(tpl, 'item%d'%src_i).start stop = getattr(tpl, 'item%d'%src_i).stop size = ao.shape[src_i] if start > size: start = size dataptr = direct_ptradd(dataptr, start*ao.strides[src_i]) if stop < size: size = stop size -= start if size < 0: size = 0 array.shape[tgt_i] = size array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 else: assert 0 src_i += 1 # consume the rest of ndim as if we found more slices while tgt_i < ndim: array.shape[tgt_i] = ao.shape[src_i] array.strides[tgt_i] = ao.strides[src_i] tgt_i += 1 src_i += 1 ll_assert(tgt_i == ndim, "tgt_i == ndim") array.dataptr = dataptr array.data = ao.data # keep a ref return array
def finalize_call(self, args, args_w, call_local): stride = capi.c_function_arg_sizeof() for i in range(len(args_w)): conv = self.converters[i] arg_i = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), i * stride) loc_i = self._address_from_local_buffer(call_local, i) conv.finalize_call(self.space, args_w[i], loc_i) conv.free_argument(self.space, rffi.cast(capi.C_OBJECT, arg_i), loc_i) capi.c_deallocate_function_args(args)
def writeall_not_sandboxed(fd, buf, length): while length > 0: size = rffi.cast(rffi.SIZE_T, length) count = rffi.cast(lltype.Signed, ll_write_not_sandboxed(fd, buf, size)) if count <= 0: raise IOError length -= count buf = lltype.direct_ptradd(lltype.direct_arrayitems(buf), count) buf = rffi.cast(rffi.CCHARP, buf)
def test_convert_subarray(self): A = lltype.GcArray(lltype.Signed) a = lltype.malloc(A, 20) inside = lltype.direct_ptradd(lltype.direct_arrayitems(a), 3) lltype2ctypes(inside) start = rffi.cast(lltype.Signed, lltype.direct_arrayitems(a)) inside_int = rffi.cast(lltype.Signed, inside) assert inside_int == start+rffi.sizeof(lltype.Signed)*3
def test_convert_subarray(self): A = lltype.GcArray(lltype.Signed) a = lltype.malloc(A, 20) inside = lltype.direct_ptradd(lltype.direct_arrayitems(a), 3) lltype2ctypes(inside) start = rffi.cast(lltype.Signed, lltype.direct_arrayitems(a)) inside_int = rffi.cast(lltype.Signed, inside) assert inside_int == start + rffi.sizeof(lltype.Signed) * 3
def prepare_arguments(self, args_w, call_local): jit.promote(self) args = capi.c_allocate_function_args(len(args_w)) stride = capi.c_function_arg_sizeof() for i in range(len(args_w)): conv = self.converters[i] w_arg = args_w[i] try: arg_i = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), i * stride) loc_i = self._address_from_local_buffer(call_local, i) conv.convert_argument(self.space, w_arg, rffi.cast(capi.C_OBJECT, arg_i), loc_i) except: # fun :-( for j in range(i): conv = self.converters[j] arg_j = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), j * stride) loc_j = self._address_from_local_buffer(call_local, j) conv.free_argument(self.space, rffi.cast(capi.C_OBJECT, arg_j), loc_j) capi.c_deallocate_function_args(args) raise return args
def pop(self): while self.static_current != gcdata.static_root_end: result = self.static_current self.static_current += sizeofaddr if result.address[0].address[0] != llmemory.NULL: return result.address[0] i = self.static_roots_index if i > 0: i -= 1 self.static_roots_index = i p = lltype.direct_arrayitems(gcdata.static_roots) p = lltype.direct_ptradd(p, i) return llmemory.cast_ptr_to_adr(p) return llmemory.NULL
def initialize(self): if we_are_translated(): n = 2000 else: n = 10 # tests only self.list = self.alloc_gcref_list(n) self.nextindex = 0 self.oldlists = [] # A pseudo dictionary: it is fixed size, and it may contain # random nonsense after a collection moved the objects. It is only # used to avoid too many duplications in the GCREF_LISTs. self.hashtable = lltype.malloc(self.HASHTABLE, self.HASHTABLE_SIZE + 1, flavor='raw') dummy = lltype.direct_ptradd(lltype.direct_arrayitems(self.hashtable), self.HASHTABLE_SIZE) dummy = llmemory.cast_ptr_to_adr(dummy) for i in range(self.HASHTABLE_SIZE + 1): self.hashtable[i] = dummy
def initialize(self): if we_are_translated(): n = 2000 else: n = 10 # tests only self.list = self.alloc_gcref_list(n) self.nextindex = 0 self.oldlists = [] # A pseudo dictionary: it is fixed size, and it may contain # random nonsense after a collection moved the objects. It is only # used to avoid too many duplications in the GCREF_LISTs. self.hashtable = lltype.malloc(self.HASHTABLE, self.HASHTABLE_SIZE+1, flavor='raw') dummy = lltype.direct_ptradd(lltype.direct_arrayitems(self.hashtable), self.HASHTABLE_SIZE) dummy = llmemory.cast_ptr_to_adr(dummy) for i in range(self.HASHTABLE_SIZE+1): self.hashtable[i] = dummy
def ref(self, firstitemptr): A = lltype.typeOf(firstitemptr).TO if A == self.TYPE: # for array of containers parent, index = lltype.parentlink(firstitemptr._obj) assert parent, "%r is not within a container" % (firstitemptr,) assert isinstance(lltype.typeOf(parent), (lltype.Array, lltype.FixedSizeArray)), ( "%r is not within an array" % (firstitemptr,)) if isinstance(index, str): assert index.startswith('item') # itemN => N index = int(index[4:]) return parent.getitem(index + self.repeat)._as_ptr() elif isinstance(A, lltype.FixedSizeArray) and A.OF == self.TYPE: # for array of primitives or pointers return lltype.direct_ptradd(firstitemptr, self.repeat) else: raise TypeError('got %r, expected %r' % (A, self.TYPE))
def __getitem__(self, index): ptr = self.addr.ref() if index != 0: ptr = lltype.direct_ptradd(ptr, index) return self.read_from_ptr(ptr)
def _opaque_direct_ptradd(ptr, offset): address = rffi.cast(rffi.CCHARP, ptr) return rffi.cast(capi.C_OBJECT, lltype.direct_ptradd(address, offset))
def get_addr_for_num(self, i): return rffi.cast(lltype.Signed, lltype.direct_ptradd( lltype.direct_arrayitems(self.ar), i))
def direct_ptradd(ptr, offset): offset = rffi.cast(rffi.SIZE_T, offset) jit.promote(offset) assert lltype.typeOf(ptr) == C_OBJECT address = rffi.cast(rffi.CCHARP, ptr) return rffi.cast(C_OBJECT, lltype.direct_ptradd(address, offset))
def op_direct_ptradd(obj, index): checkptr(obj) assert is_valid_int(index) return lltype.direct_ptradd(obj, index)
def op_direct_ptradd(obj, index): checkptr(obj) assert isinstance(index, int) return lltype.direct_ptradd(obj, index)
def next(iself, gc, next, range_highest): # Return the "next" valid GC object' address. This usually # means just returning "next", until we reach "range_highest", # except that we are skipping NULLs. If "next" contains a # MARKER instead, then we go into JIT-frame-lookup mode. # while True: # # If we are not iterating right now in a JIT frame if iself.frame_addr == 0: # # Look for the next shadowstack address that # contains a valid pointer while next != range_highest: if next.signed[0] == self.MARKER: break if gc.points_to_valid_gc_object(next): return next next += llmemory.sizeof(llmemory.Address) else: return llmemory.NULL # done # # It's a JIT frame. Save away 'next' for later, and # go into JIT-frame-exploring mode. next += llmemory.sizeof(llmemory.Address) frame_addr = next.signed[0] iself.saved_next = next iself.frame_addr = frame_addr addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) addr = iself.translateptr(iself.context, addr) force_index = addr.signed[0] if force_index < 0: force_index = ~force_index # NB: the next line reads a still-alive _callshapes, # because we ensure that just before we called this # piece of assembler, we put on the (same) stack a # pointer to a loop_token that keeps the force_index # alive. callshape = self._callshapes[force_index] else: # Continuing to explore this JIT frame callshape = iself.callshape # # 'callshape' points to the next INT of the callshape. # If it's zero we are done with the JIT frame. while rffi.cast(lltype.Signed, callshape[0]) != 0: # # Non-zero: it's an offset inside the JIT frame. # Read it and increment 'callshape'. offset = rffi.cast(lltype.Signed, callshape[0]) callshape = lltype.direct_ptradd(callshape, 1) addr = llmemory.cast_int_to_adr(iself.frame_addr + offset) addr = iself.translateptr(iself.context, addr) if gc.points_to_valid_gc_object(addr): # # The JIT frame contains a valid GC pointer at # this address (as opposed to NULL). Save # 'callshape' for the next call, and return the # address. iself.callshape = callshape return addr # # Restore 'prev' and loop back to the start. iself.frame_addr = 0 next = iself.saved_next next += llmemory.sizeof(llmemory.Address)
def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local stride = 2 * rffi.sizeof(rffi.VOIDP) loc_idx = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, call_local), idx * stride) return rffi.cast(rffi.VOIDP, loc_idx)
def f(p, n): return lltype.direct_ptradd(p, n)
def fn(): p1 = lltype.direct_arrayitems(a1) p2 = lltype.direct_ptradd(p1, 6) return p2[0]
def __setitem__(self, index, value): assert lltype.typeOf(value) == self.TYPE ptr = self.addr.ref() if index != 0: ptr = lltype.direct_ptradd(ptr, index) self.write_into_ptr(ptr, value)
def get_addr_for_num(self, i): chunk_no, ofs = self._no_of(i) chunk = self.chunks[chunk_no] rffi.cast(lltype.Signed, chunk) return rffi.cast(lltype.Signed, lltype.direct_ptradd(lltype.direct_arrayitems(chunk), ofs))
def get_addr_for_num(self, i): return rffi.cast( lltype.Signed, lltype.direct_ptradd(lltype.direct_arrayitems(self.ar), i))