def ll_dealloc(addr): # bump refcount to 1 gcheader = llmemory.cast_adr_to_ptr(addr - gc_header_offset, HDRPTR) gcheader.refcount = 1 v = llmemory.cast_adr_to_ptr(addr, QUERY_ARG_TYPE) rtti = queryptr(v) gcheader.refcount = 0 llop.gc_call_rtti_destructor(lltype.Void, rtti, addr)
def cast_adr_to_int(self, adr): if not adr: return 0 try: ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_FUNCPTR) assert ptr._obj._callable == llop1._write_barrier_failing_case return 42 except lltype.InvalidCast: ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_ARRAY_FUNCPTR) assert ptr._obj._callable == llop1._write_barrier_from_array_failing_case return 43
def _check_subclass(self, vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) if (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max): return True return False
def _check_subclass(vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) # note: the test is for a range including 'max', but 'max' # should never be used for actual classes. Including it makes # it easier to pass artificial tests. return (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max)
def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0 arena_free(a) return 42
def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1): assert step in (1, 2) llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER)) page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR) if step == 1: page.nfree = 0 nuninitialized = nblocks - nusedblocks else: page.nfree = nusedblocks nuninitialized = nblocks - 2*nusedblocks page.freeblock = pageaddr + hdrsize + nusedblocks * size_block if nusedblocks < nblocks: chainedlists = ac.page_for_size else: chainedlists = ac.full_page_for_size page.nextpage = chainedlists[size_class] page.arena = ac.current_arena chainedlists[size_class] = page if fill_with_objects: for i in range(0, nusedblocks*step, step): objaddr = pageaddr + hdrsize + i * size_block llarena.arena_reserve(objaddr, _dummy_size(size_block)) if step == 2: prev = 'page.freeblock' for i in range(1, nusedblocks*step, step): holeaddr = pageaddr + hdrsize + i * size_block llarena.arena_reserve(holeaddr, llmemory.sizeof(llmemory.Address)) exec '%s = holeaddr' % prev in globals(), locals() prevhole = holeaddr prev = 'prevhole.address[0]' endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block exec '%s = endaddr' % prev in globals(), locals() assert ac._nuninitialized(page, size_class) == nuninitialized
def ll_finalizer(addr): try: v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG) self.llinterp.eval_graph(destrgraph, [v], recursive=True) except llinterp.LLException: raise RuntimeError( "a finalizer raised an exception, shouldn't happen")
def malloc_varsize_slowpath(self, typeid, length, force_nonmovable=False): # For objects that are too large, or when the nursery is exhausted. # In order to keep malloc_varsize_clear() as compact as possible, # we recompute what we need in this slow path instead of passing # it all as function arguments. size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + self.fixed_size(typeid) itemsize = self.varsize_item_sizes(typeid) offset_to_length = self.varsize_offset_to_length(typeid) try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) except OverflowError: raise MemoryError() if self.has_gcptr_in_varsize(typeid): nonlarge_max = self.nonlarge_gcptrs_max else: nonlarge_max = self.nonlarge_max if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max: result = self.malloc_varsize_marknsweep(totalsize) flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED else: result = self.malloc_varsize_collecting_nursery(totalsize) flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS self.init_gc_object(result, typeid, flags) (result + size_gc_header + offset_to_length).signed[0] = length return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def ll_decref(adr, dealloc): if adr: gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR) refcount = gcheader.refcount - 1 gcheader.refcount = refcount if refcount == 0: dealloc(adr)
def malloc(self, TYPE, n=None): addr = self.gc.malloc(self.get_type_id(TYPE), n) debug_print(self.gc) obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) if not self.gc.malloc_zero_filled: zero_gc_pointers_inside(obj_ptr, TYPE) return obj_ptr
def f(): adr = llmemory.raw_malloc(sizeofs) s = llmemory.cast_adr_to_ptr(adr, STRUCTPTR) s.y = 5 # does not crash result = (adr + offsety).signed[0] * 10 + int(offsety < sizeofs) llmemory.raw_free(adr) return result
def malloc_fixedsize_clear(self, typeid, size, has_finalizer=False, is_finalizer_light=False, contains_weakptr=False): if (has_finalizer or (raw_malloc_usage(size) > self.lb_young_fixedsize and raw_malloc_usage(size) > self.largest_young_fixedsize)): # ^^^ we do two size comparisons; the first one appears redundant, # but it can be constant-folded if 'size' is a constant; then # it almost always folds down to False, which kills the # second comparison as well. ll_assert(not contains_weakptr, "wrong case for mallocing weakref") # "non-simple" case or object too big: don't use the nursery return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size, has_finalizer, is_finalizer_light, contains_weakptr) size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size result = self.nursery_free if raw_malloc_usage(totalsize) > self.nursery_top - result: result = self.collect_nursery() llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) self.nursery_free = result + totalsize if contains_weakptr: self.young_objects_with_weakrefs.append(result + size_gc_header) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def customtrace(gc, obj, callback, arg): obj = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR) addr = obj.top start = obj.base while addr != start: addr -= sizeofaddr gc._trace_callback(callback, arg, addr)
def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, collect_static_in_prebuilt_gc, is_minor=False): gc = self.tester.gc layoutbuilder = self.tester.layoutbuilder if collect_static_in_prebuilt_gc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs: if addrofaddr.address[0]: collect_static_in_prebuilt_gc(gc, addrofaddr) if collect_static_in_prebuilt_nongc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc: if addrofaddr.address[0]: collect_static_in_prebuilt_nongc(gc, addrofaddr) if collect_stack_root: stackroots = self.tester.stackroots a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw') for i in range(len(a)): a[i] = llmemory.cast_ptr_to_adr(stackroots[i]) a_base = lltype.direct_arrayitems(a) for i in range(len(a)): ai = lltype.direct_ptradd(a_base, i) collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai)) for i in range(len(a)): PTRTYPE = lltype.typeOf(stackroots[i]) stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE) lltype.free(a, flavor='raw')
def execute_guard_class(self, descr, arg, klass): value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, arg) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(klass), rclass.CLASSTYPE) if value.typeptr != expected_class: self.fail_guard(descr)
def ll_weakref_create(targetaddr): link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink) if not link: raise MemoryError plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK)) plink[0] = targetaddr llop.boehm_disappearing_link(lltype.Void, link, targetaddr) return llmemory.cast_ptr_to_weakrefptr(plink)
def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr assert not has_finalizer assert not has_light_finalizer p, tid = self._malloc(type_id, size) p = llmemory.cast_adr_to_ptr(p, RESTYPE) self.record.append(("fixedsize", repr(size), tid, p)) return p
def ll_decref_simple(adr): if adr: gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR) refcount = gcheader.refcount - 1 if refcount == 0: llop.gc_free(lltype.Void, adr) else: gcheader.refcount = refcount
def gc_fq_next_dead(self, fq_tag): index = self.get_finalizer_queue_index(fq_tag) deque = self.finalizer_handlers[index][1] if deque.non_empty(): obj = deque.popleft() else: obj = llmemory.NULL return llmemory.cast_adr_to_ptr(obj, rclass.OBJECTPTR)
def is_valid_class_for(self, struct): objptr = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct) cls = llmemory.cast_adr_to_ptr( heaptracker.int2adr(self.get_vtable()), lltype.Ptr(rclass.OBJECT_VTABLE)) # this first comparison is necessary, since we want to make sure # that vtable for JitVirtualRef is the same without actually reading # fields return objptr.typeptr == cls or rclass.ll_isinstance(objptr, cls)
def weakref_create_getlazy(self, objgetter): # we have to be lazy in reading the llinterp variable containing # the 'obj' pointer, because the gc.malloc() call below could # move it around type_id = self.get_type_id(gctypelayout.WEAKREF) addr = self.gc.malloc(type_id, None, zero=False) result = llmemory.cast_adr_to_ptr(addr, gctypelayout.WEAKREFPTR) result.weakptr = llmemory.cast_ptr_to_adr(objgetter()) return llmemory.cast_ptr_to_weakrefptr(result)
def gcrefs_trace(gc, obj_addr, callback, arg): obj = llmemory.cast_adr_to_ptr(obj_addr, lltype.Ptr(GCREFTRACER)) i = 0 length = obj.array_length addr = obj.array_base_addr while i < length: p = rffi.cast(llmemory.Address, addr + i * WORD) gc._trace_callback(callback, arg, p) i += 1
def malloc_nonmovable(self, TYPE, n=None, zero=False): typeid = self.get_type_id(TYPE) if not self.gc.can_malloc_nonmovable(): return lltype.nullptr(TYPE) addr = self.gc.malloc_nonmovable(typeid, n, zero=zero) result = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) if not self.gc.malloc_zero_filled: gctypelayout.zero_gc_pointers(result) return result
def _append_rpy_root(obj, gc): # Can use the gc list, but should not allocate! # It is essential that the list is not resizable! lst = gc._list_rpy index = gc._count_rpy if index >= len(lst): raise ValueError gc._count_rpy = index + 1 lst[index] = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def do_malloc_varsize(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length p = llmemory.cast_adr_to_ptr(p, RESTYPE) self.record.append(("varsize", tid, length, repr(size), repr(itemsize), repr(offset_to_length), p)) return p
def get_forwarding_address(self, obj): tid = self.header(obj).tid if tid & GCFLAG_EXTERNAL: self.visit_external_object(obj) return obj # external or prebuilt objects are "forwarded" # to themselves else: stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR) return stub.forw
def set_forwarding_address(self, obj, newobj, objsize): # To mark an object as forwarded, we set the GCFLAG_FORWARDED and # overwrite the object with a FORWARDSTUB. Doing so is a bit # long-winded on llarena, but it all melts down to two memory # writes after translation to C. size_gc_header = self.size_gc_header() stubsize = llmemory.sizeof(self.FORWARDSTUB) tid = self.header(obj).tid ll_assert(tid & GCFLAG_EXTERNAL == 0, "unexpected GCFLAG_EXTERNAL") ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED") # replace the object at 'obj' with a FORWARDSTUB. hdraddr = obj - size_gc_header llarena.arena_reset(hdraddr, size_gc_header + objsize, False) llarena.arena_reserve(hdraddr, size_gc_header + stubsize) hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR)) hdr.tid = tid | GCFLAG_FORWARDED stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR) stub.forw = newobj
def customtrace(gc, obj, callback, arg): stacklet = llmemory.cast_adr_to_ptr(obj, STACKLET_PTR) sscopy = stacklet.s_sscopy if sscopy: length_bytes = sscopy.signed[0] while length_bytes > 0: addr = sscopy + length_bytes gc._trace_callback(callback, arg, addr) length_bytes -= SIZEADDR
def op_gc_reload_possibly_moved(self, v_newaddr, v_ptr): assert v_newaddr.concretetype is llmemory.Address assert isinstance(v_ptr.concretetype, lltype.Ptr) assert v_ptr.concretetype.TO._gckind == 'gc' newaddr = self.getval(v_newaddr) p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype) if isinstance(v_ptr, Constant): assert v_ptr.value == p else: self.setvar(v_ptr, p)
def __setitem__(self, index, newvalue): if index != 0: raise IndexError("address of local vars only support [0] indexing") if self.v.concretetype == llmemory.WeakRefPtr: # fish some more assert isinstance(newvalue, llmemory.fakeaddress) p = llmemory.cast_ptr_to_weakrefptr(newvalue.ptr) else: p = llmemory.cast_adr_to_ptr(newvalue, self.v.concretetype) self.frame.setvar(self.v, p)
def ll_identityhash(addr): obj = llmemory.cast_adr_to_ptr(addr, HDRPTR) h = obj.hash if h == 0: obj.hash = h = ~llmemory.cast_adr_to_int(addr) return h
class CustomBaseTestRegalloc(BaseTestRegalloc): cpu = CPU(None, None) cpu.setup_once() def raising_func(i): if i: raise LLException(zero_division_error, zero_division_value) FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) raising_fptr = llhelper(FPTR, raising_func) def f(a): return 23 FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) f_fptr = llhelper(FPTR, f) f_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) zero_division_tp, zero_division_value = get_zero_division_error(cpu) zd_addr = cpu.cast_int_to_adr(zero_division_tp) zero_division_error = llmemory.cast_adr_to_ptr( zd_addr, lltype.Ptr(rclass.OBJECT_VTABLE)) raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) targettoken = TargetToken() targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) def setup_method(self, meth): self.targettoken._ll_loop_code = 0 self.targettoken2._ll_loop_code = 0 def f1(x): return x + 1 def f2(x, y): return x * y def f10(*args): assert len(args) == 10 return sum(args) F1PTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 2, lltype.Signed)) F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 10, lltype.Signed)) f1ptr = llhelper(F1PTR, f1) f2ptr = llhelper(F2PTR, f2) f10ptr = llhelper(F10PTR, f10) f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, EffectInfo.MOST_GENERAL) f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, EffectInfo.MOST_GENERAL) f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT, EffectInfo.MOST_GENERAL) typesystem = 'lltype' namespace = locals().copy()
class BaseTestRegalloc(object): cpu = CPU(None, None) cpu.setup_once() def raising_func(i): if i: raise LLException(zero_division_error, zero_division_value) FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) raising_fptr = llhelper(FPTR, raising_func) zero_division_tp, zero_division_value = get_zero_division_error(cpu) zd_addr = cpu.cast_int_to_adr(zero_division_tp) zero_division_error = llmemory.cast_adr_to_ptr( zd_addr, lltype.Ptr(rclass.OBJECT_VTABLE)) raising_calldescr = cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, EffectInfo.MOST_GENERAL) targettoken = TargetToken() targettoken2 = TargetToken() fdescr1 = BasicFailDescr(1) fdescr2 = BasicFailDescr(2) fdescr3 = BasicFailDescr(3) def setup_method(self, meth): self.targettoken._ll_loop_code = 0 self.targettoken2._ll_loop_code = 0 def f1(x): return x + 1 def f2(x, y): return x * y def f10(*args): assert len(args) == 10 return sum(args) F1PTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 2, lltype.Signed)) F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed] * 10, lltype.Signed)) f1ptr = llhelper(F1PTR, f1) f2ptr = llhelper(F2PTR, f2) f10ptr = llhelper(F10PTR, f10) f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT, EffectInfo.MOST_GENERAL) f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT, EffectInfo.MOST_GENERAL) f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT, EffectInfo.MOST_GENERAL) namespace = locals().copy() type_system = 'lltype' def parse(self, s, boxkinds=None, namespace=None): return parse(s, self.cpu, namespace or self.namespace, type_system=self.type_system, boxkinds=boxkinds) def interpret(self, ops, args, run=True, namespace=None): loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): arguments.append(arg) elif isinstance(arg, float): arg = longlong.getfloatstorage(arg) arguments.append(arg) else: assert isinstance(lltype.typeOf(arg), lltype.Ptr) llgcref = lltype.cast_opaque_ptr(llmemory.GCREF, arg) arguments.append(llgcref) loop._jitcelltoken = looptoken if run: self.deadframe = self.cpu.execute_token(looptoken, *arguments) return loop def prepare_loop(self, ops): loop = self.parse(ops) try: regalloc = self.cpu.build_regalloc() except AttributeError: return None regalloc.prepare_loop(loop.inputargs, loop.operations, loop.original_jitcell_token, []) return regalloc def getint(self, index): return self.cpu.get_int_value(self.deadframe, index) def getfloat(self, index): return self.cpu.get_float_value(self.deadframe, index) def getints(self, end): return [ self.cpu.get_int_value(self.deadframe, index) for index in range(0, end) ] def getfloats(self, end): return [ longlong.getrealfloat( self.cpu.get_float_value(self.deadframe, index)) for index in range(0, end) ] def getptr(self, index, T): gcref = self.cpu.get_ref_value(self.deadframe, index) return lltype.cast_opaque_ptr(T, gcref) def attach_bridge(self, ops, loop, guard_op_index, **kwds): guard_op = loop.operations[guard_op_index] assert guard_op.is_guard() bridge = self.parse(ops, **kwds) assert ([box.type for box in bridge.inputargs ] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, loop._jitcelltoken) return bridge def run(self, loop, *arguments): self.deadframe = self.cpu.execute_token(loop._jitcelltoken, *arguments) return self.cpu.get_latest_descr(self.deadframe)
def instanceOf(self, instbox, clsbox): adr = clsbox.getaddr() bounding_class = llmemory.cast_adr_to_ptr(adr, rclass.CLASSTYPE) real_instance = instbox.getref(rclass.OBJECTPTR) return rclass.ll_isinstance(real_instance, bounding_class)
def q_finalizer_handlers(self): adr = self.finalizer_handlers # set from framework.py or gcwrapper.py return llmemory.cast_adr_to_ptr(adr, lltype.Ptr(FIN_HANDLER_ARRAY))
def get_typeids_z(gc): srcaddress = gc.root_walker.gcdata.typeids_z return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(rgc.ARRAY_OF_CHAR))
def ll_incref(adr): if adr: gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR) gcheader.refcount = gcheader.refcount + 1
def op_cast_adr_to_ptr(TYPE, adr): checkadr(adr) return llmemory.cast_adr_to_ptr(adr, TYPE)
def get_typeids_list(gc): srcaddress = gc.root_walker.gcdata.typeids_list return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(ARRAY_OF_HALFWORDS))
def ll_finalizer(addr): exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE) v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG) ll_call_destructor(destrptr, v, typename) llop.gc_restore_exception(lltype.Void, exc_instance)
def weakref_deref(self, PTRTYPE, obj): addr = gctypelayout.ll_weakref_deref(obj) return llmemory.cast_adr_to_ptr(addr, PTRTYPE)
def header(self, addr): addr -= self.gcheaderbuilder.size_gc_header return llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
def f(): s1 = setup() llop.gc__collect(lltype.Void) return llmemory.cast_adr_to_ptr(s1.x, lltype.Ptr(T)).z
def cast_int_to_ptr(x, TYPE): x = llmemory.cast_int_to_adr(x) return llmemory.cast_adr_to_ptr(x, TYPE)
def f(): s1 = setup() llop.gc__collect(lltype.Void) return llmemory.cast_adr_to_ptr(getattr(s1, attrname), lltype.Ptr(T))
def cast_adr_to_nongc_instance(Class, ptr): from rpython.rtyper.rclass import NONGCOBJECTPTR ptr = llmemory.cast_adr_to_ptr(ptr, NONGCOBJECTPTR) return cast_base_ptr_to_nongc_instance(Class, ptr)