def op_int_xor(x, y): # used in computing hashes if isinstance(x, AddressAsInt): x = llmemory.cast_adr_to_int(x.adr) if isinstance(y, AddressAsInt): y = llmemory.cast_adr_to_int(y.adr) assert isinstance(x, int) assert isinstance(y, int) return x ^ y
def op_int_xor(x, y): # used in computing hashes if isinstance(x, AddressAsInt): x = llmemory.cast_adr_to_int(x.adr) if isinstance(y, AddressAsInt): y = llmemory.cast_adr_to_int(y.adr) assert isinstance(x, int) assert isinstance(y, int) return x ^ y
def fn(n): a = llmemory.cast_ptr_to_adr(p) if n == 2: return llmemory.cast_adr_to_int(a, "emulated") elif n == 4: return llmemory.cast_adr_to_int(a, "symbolic") else: return llmemory.cast_adr_to_int(a, "forced")
def fn(n): a = llmemory.cast_ptr_to_adr(p) if n == 2: return llmemory.cast_adr_to_int(a, "emulated") elif n == 4: return llmemory.cast_adr_to_int(a, "symbolic") else: return llmemory.cast_adr_to_int(a, "forced")
def test_force_to_int(self): S = lltype.Struct('S') p = lltype.malloc(S, flavor='raw') a = llmemory.cast_ptr_to_adr(p) i = llmemory.cast_adr_to_int(a, "forced") assert type(i) is int assert i == llmemory.cast_adr_to_int(a, "forced") lltype.free(p, flavor='raw')
def get_identityhash_from_addr(self, obj): if translated_to_c(): return llmemory.cast_adr_to_int(obj) # direct case else: try: adr = llarena.getfakearenaaddress(obj) # -> arena address except RuntimeError: return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space
def get_identityhash_from_addr(self, obj): if translated_to_c(): return llmemory.cast_adr_to_int(obj) # direct case else: try: adr = llarena.getfakearenaaddress(obj) # -> arena address except RuntimeError: return llmemory.cast_adr_to_int(obj) # not in an arena... return adr - self.space
def test_address_eq_as_int(): a = arena_malloc(50, False) arena_reserve(a, precomputed_size) p = llmemory.cast_adr_to_ptr(a, SPTR) a1 = llmemory.cast_ptr_to_adr(p) assert a == a1 assert not (a != a1) assert (a+1) != a1 assert not ((a+1) == a1) py.test.skip("cast_adr_to_int() is hard to get consistent") assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1) assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
def test_address_eq_as_int(): a = arena_malloc(50, False) arena_reserve(a, precomputed_size) p = llmemory.cast_adr_to_ptr(a, SPTR) a1 = llmemory.cast_ptr_to_adr(p) assert a == a1 assert not (a != a1) assert (a + 1) != a1 assert not ((a + 1) == a1) py.test.skip("cast_adr_to_int() is hard to get consistent") assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1) assert llmemory.cast_adr_to_int(a + 1) == llmemory.cast_adr_to_int(a1) + 1
def id(self, ptr): # Default implementation for id(), assuming that "external" objects # never move. Overriden in the HybridGC. obj = llmemory.cast_ptr_to_adr(ptr) # is it a tagged pointer? or an external object? if not self.is_valid_gc_object(obj) or self._is_external(obj): return llmemory.cast_adr_to_int(obj) # tagged pointers have ids of the form 2n + 1 # external objects have ids of the form 4n (due to word alignment) # self._compute_id returns addresses of the form 2n + 1 # if we multiply by 2, we get ids of the form 4n + 2, thus we get no # clashes return llmemory.cast_adr_to_int(self._compute_id(obj)) * 2
def identityhash(self, gcobj): # The following code should run at most twice. while 1: obj = llmemory.cast_ptr_to_adr(gcobj) hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end obj += self.get_size(obj) return obj.signed[0] # if not (hdr.tid & GCFLAG_HASHTAKEN): # It's the first time we ask for a hash, and it's not an # external object. Shrink the top of space by the extra # hash word that will be needed after a collect. shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed) if shrunk_top < self.free: # Cannot shrink! Do a collection, asking for at least # one word of free space, and try again. May raise # MemoryError. Obscure: not called directly, but # across an llop, to make sure that there is the # correct push_roots/pop_roots around the call... llop.gc_obtain_free_space(llmemory.Address, llmemory.sizeof(lltype.Signed)) continue # Now we can have side-effects: set GCFLAG_HASHTAKEN # and lower the top of space. self.top_of_space = shrunk_top hdr.tid |= GCFLAG_HASHTAKEN # return llmemory.cast_adr_to_int(obj) # direct case
def id(self, ptr): obj = llmemory.cast_ptr_to_adr(ptr) if self.header(obj).tid & GCFLAG_EXTERNAL: result = self._compute_id_for_external(obj) else: result = self._compute_id(obj) return llmemory.cast_adr_to_int(result)
def get_address_of_gcref(self, gcref): assert lltype.typeOf(gcref) == llmemory.GCREF # first look in the hashtable, using an inexact hash (fails after # the object moves) addr = llmemory.cast_ptr_to_adr(gcref) hash = llmemory.cast_adr_to_int(addr) hash -= hash >> self.HASHTABLE_BITS hash &= self.HASHTABLE_SIZE - 1 addr_ref = self.hashtable[hash] # the following test is safe anyway, because the addresses found # in the hashtable are always the addresses of nonmovable stuff # ('addr_ref' is an address inside self.list, not directly the # address of a real moving GC object -- that's 'addr_ref.address[0]'.) if addr_ref.address[0] == addr: return addr_ref # if it fails, add an entry to the list if self.nextindex == len(self.list): # reallocate first, increasing a bit the size every time self.oldlists.append(self.list) self.list = self.alloc_gcref_list(len(self.list) // 4 * 5) self.nextindex = 0 # add it index = self.nextindex self.list[index] = gcref addr_ref = lltype.direct_ptradd(lltype.direct_arrayitems(self.list), index) addr_ref = llmemory.cast_ptr_to_adr(addr_ref) self.nextindex = index + 1 # record it in the hashtable self.hashtable[hash] = addr_ref return addr_ref
def start_of_page(addr, page_size): """Return the address of the start of the page that contains 'addr'.""" if we_are_translated(): offset = llmemory.cast_adr_to_int(addr) % page_size return addr - offset else: return _start_of_page_untranslated(addr, page_size)
def get_address_of_gcref(self, gcref): assert lltype.typeOf(gcref) == llmemory.GCREF # first look in the hashtable, using an inexact hash (fails after # the object moves) addr = llmemory.cast_ptr_to_adr(gcref) hash = llmemory.cast_adr_to_int(addr) hash -= hash >> self.HASHTABLE_BITS hash &= self.HASHTABLE_SIZE - 1 addr_ref = self.hashtable[hash] # the following test is safe anyway, because the addresses found # in the hashtable are always the addresses of nonmovable stuff # ('addr_ref' is an address inside self.list, not directly the # address of a real moving GC object -- that's 'addr_ref.address[0]'.) if addr_ref.address[0] == addr: return addr_ref # if it fails, add an entry to the list if self.nextindex == len(self.list): # reallocate first, increasing a bit the size every time self.oldlists.append(self.list) self.list = self.alloc_gcref_list(len(self.list) // 4 * 5) self.nextindex = 0 # add it index = self.nextindex self.list[index] = gcref addr_ref = lltype.direct_ptradd(lltype.direct_arrayitems(self.list), index) addr_ref = llmemory.cast_ptr_to_adr(addr_ref) self.nextindex = index + 1 # record it in the hashtable self.hashtable[hash] = addr_ref return addr_ref
def cast_whatever_to_int(T, value): if isinstance(T, lltype.Ptr): return lltype.cast_ptr_to_int(value) elif T is llmemory.Address: return llmemory.cast_adr_to_int(value) else: return lltype.cast_primitive(lltype.Signed, value)
def identityhash(self, gcobj): # The following code should run at most twice. while 1: obj = llmemory.cast_ptr_to_adr(gcobj) hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end obj += self.get_size(obj) return obj.signed[0] # if not (hdr.tid & GCFLAG_HASHTAKEN): # It's the first time we ask for a hash, and it's not an # external object. Shrink the top of space by the extra # hash word that will be needed after a collect. shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed) if shrunk_top < self.free: # Cannot shrink! Do a collection, asking for at least # one word of free space, and try again. May raise # MemoryError. Obscure: not called directly, but # across an llop, to make sure that there is the # correct push_roots/pop_roots around the call... llop.gc_obtain_free_space(llmemory.Address, llmemory.sizeof(lltype.Signed)) continue # Now we can have side-effects: set GCFLAG_HASHTAKEN # and lower the top of space. self.top_of_space = shrunk_top hdr.tid |= GCFLAG_HASHTAKEN # return llmemory.cast_adr_to_int(obj) # direct case
def start_of_page(addr, page_size): """Return the address of the start of the page that contains 'addr'.""" if we_are_translated(): offset = llmemory.cast_adr_to_int(addr) % page_size return addr - offset else: return _start_of_page_untranslated(addr, page_size)
def writeobj(self, obj): gc = self.gc typeid = gc.get_type_id(obj) self.write(llmemory.cast_adr_to_int(obj)) self.write(gc.get_member_index(typeid)) self.write(gc.get_size_incl_hash(obj)) gc.trace(obj, self._writeref, None) self.write(-1)
def identityhash(self, obj): obj = llmemory.cast_ptr_to_adr(obj) hdr = self.header(obj) if ord(hdr.flags) & FL_WITHHASH: obj += self.get_size(obj) return obj.signed[0] else: return llmemory.cast_adr_to_int(obj)
def identityhash(self, obj): obj = llmemory.cast_ptr_to_adr(obj) hdr = self.header(obj) if ord(hdr.flags) & FL_WITHHASH: obj += self.get_size(obj) return obj.signed[0] else: return llmemory.cast_adr_to_int(obj)
def revealconst(self, T): if T is llmemory.Address: return self.addr elif isinstance(T, lltype.Ptr): return llmemory.cast_adr_to_ptr(self.addr, T) elif T is lltype.Signed: return llmemory.cast_adr_to_int(self.addr) else: assert 0, "XXX not implemented"
def cast_adr_to_whatever(T, addr): if T is llmemory.Address: return addr elif isinstance(T, lltype.Ptr): return llmemory.cast_adr_to_ptr(addr, T) elif T is lltype.Signed: return llmemory.cast_adr_to_int(addr) else: assert 0, "XXX not implemented"
def load_now(self, asm, loc): value = llmemory.cast_adr_to_int(self.addr) if loc.is_register: assert isinstance(loc, insn.GPR) asm.load_word(loc.number, value) else: #print 'load_now to', loc.offset asm.load_word(rSCRATCH, value) asm.stw(rSCRATCH, rFP, loc.offset)
def id(self, ptr): # Default implementation for id(), assuming that "external" objects # never move. Overriden in the HybridGC. obj = llmemory.cast_ptr_to_adr(ptr) if self._is_external(obj): result = obj else: result = self._compute_id(obj) return llmemory.cast_adr_to_int(result)
def revealconst(self, T): if T is llmemory.Address: return self.addr elif isinstance(T, lltype.Ptr): return llmemory.cast_adr_to_ptr(self.addr, T) elif T is lltype.Signed: return llmemory.cast_adr_to_int(self.addr) else: assert 0, "XXX not implemented"
def load_now(self, asm, loc): value = llmemory.cast_adr_to_int(self.addr) if loc.is_register: assert isinstance(loc, insn.GPR) asm.load_word(loc.number, value) else: #print 'load_now to', loc.offset asm.load_word(rSCRATCH, value) asm.stw(rSCRATCH, rFP, loc.offset)
def make_hashable_int(i): from pypy.rpython.lltypesystem.ll2ctypes import NotCtypesAllocatedStructure if not we_are_translated() and isinstance(i, llmemory.AddressAsInt): # Warning: such a hash changes at the time of translation adr = heaptracker.int2adr(i) try: return llmemory.cast_adr_to_int(adr, "emulated") except NotCtypesAllocatedStructure: return 12345 # use an arbitrary number for the hash return i
def make_hashable_int(i): from pypy.rpython.lltypesystem.ll2ctypes import NotCtypesAllocatedStructure if not we_are_translated() and isinstance(i, llmemory.AddressAsInt): # Warning: such a hash changes at the time of translation adr = heaptracker.int2adr(i) try: return llmemory.cast_adr_to_int(adr, "emulated") except NotCtypesAllocatedStructure: return 12345 # use an arbitrary number for the hash return i
def test_repr_ll2ctypes(): ptr = lltype.malloc(rffi.VOIDPP.TO, 10, flavor='raw') # force it to be a ll2ctypes object ptr = rffi.cast(rffi.VOIDPP, rffi.cast(rffi.LONG, ptr)) adr = llmemory.cast_ptr_to_adr(ptr) lltype.free(ptr, flavor='raw') intval = llmemory.cast_adr_to_int(adr, 'symbolic') box = BoxInt(intval) s = box.repr_rpython() assert s.startswith('12345/') # the arbitrary hash value used by
def test_repr_ll2ctypes(): ptr = lltype.malloc(rffi.VOIDPP.TO, 10, flavor='raw') # force it to be a ll2ctypes object ptr = rffi.cast(rffi.VOIDPP, rffi.cast(rffi.LONG, ptr)) adr = llmemory.cast_ptr_to_adr(ptr) lltype.free(ptr, flavor='raw') intval = llmemory.cast_adr_to_int(adr, 'symbolic') box = BoxInt(intval) s = box.repr_rpython() assert s.startswith('12345/') # the arbitrary hash value used by
def _get_object_hash(self, obj, objsize, tid): # Returns the hash of the object, which must not be GC_HASH_NOTTAKEN. gc_hash = tid & GCFLAG_HASHMASK if gc_hash == GC_HASH_HASFIELD: obj = llarena.getfakearenaaddress(obj) return (obj + objsize).signed[0] elif gc_hash == GC_HASH_TAKEN_ADDR: return llmemory.cast_adr_to_int(obj) elif gc_hash == GC_HASH_TAKEN_NURS: return self._compute_current_nursery_hash(obj) else: assert 0, "gc_hash == GC_HASH_NOTTAKEN"
def _get_object_hash(self, obj, objsize, tid): # Returns the hash of the object, which must not be GC_HASH_NOTTAKEN. gc_hash = tid & GCFLAG_HASHMASK if gc_hash == GC_HASH_HASFIELD: obj = llarena.getfakearenaaddress(obj) return (obj + objsize).signed[0] elif gc_hash == GC_HASH_TAKEN_ADDR: return llmemory.cast_adr_to_int(obj) elif gc_hash == GC_HASH_TAKEN_NURS: return self._compute_current_nursery_hash(obj) else: assert 0, "gc_hash == GC_HASH_NOTTAKEN"
def id(self, ptr): obj = llmemory.cast_ptr_to_adr(ptr) # is it a tagged pointer? if not self.is_valid_gc_object(obj): return llmemory.cast_adr_to_int(obj) if self._is_external(obj): # a prebuilt or rawmalloced object if self.is_last_generation(obj): # a generation 3 object may be one that used to live in # the semispace. So we still need to check if the object had # its id taken before. If not, we can use its address as its # id as it is not going to move any more. result = self.objects_with_id.get(obj, obj) else: # a generation 2 external object was never non-external in # the past, so it cannot be listed in self.objects_with_id. result = obj else: result = self._compute_id(obj) # common case return llmemory.cast_adr_to_int(result) * 2 # see comment in base.py
def id(self, ptr): obj = llmemory.cast_ptr_to_adr(ptr) # is it a tagged pointer? if not self.is_valid_gc_object(obj): return llmemory.cast_adr_to_int(obj) if self._is_external(obj): # a prebuilt or rawmalloced object if self.is_last_generation(obj): # a generation 3 object may be one that used to live in # the semispace. So we still need to check if the object had # its id taken before. If not, we can use its address as its # id as it is not going to move any more. result = self.objects_with_id.get(obj, obj) else: # a generation 2 external object was never non-external in # the past, so it cannot be listed in self.objects_with_id. result = obj else: result = self._compute_id(obj) # common case return llmemory.cast_adr_to_int(result) * 2 # see comment in base.py
def _generalcast(T, value): if isinstance(T, lltype.Ptr): return lltype.cast_pointer(T, value) elif T == llmemory.Address: return llmemory.cast_ptr_to_adr(value) else: T1 = lltype.typeOf(value) if T1 is llmemory.Address: value = llmemory.cast_adr_to_int(value) elif isinstance(T1, lltype.Ptr): value = lltype.cast_ptr_to_int(value) else: value = value return lltype.cast_primitive(T, value)
def _generalcast(T, value): if lltype.typeOf(value) == T: return value elif isinstance(T, lltype.Ptr): return lltype.cast_pointer(T, value) elif T == llmemory.Address: return llmemory.cast_ptr_to_adr(value) elif isinstance(T, ootype.StaticMethod): fn = value._obj return ootype._static_meth(T, graph=fn.graph, _callable=fn._callable) else: T1 = lltype.typeOf(value) if T1 is llmemory.Address: value = llmemory.cast_adr_to_int(value) elif isinstance(T1, lltype.Ptr): value = lltype.cast_ptr_to_int(value) else: value = value return lltype.cast_primitive(T, value)
def identityhash(self, gcobj): # Unlike SemiSpaceGC.identityhash(), this function does not have # to care about reducing top_of_space. The reason is as # follows. When we collect, each object either moves to the # left or stays where it is. If it moves to the left (and if it # has GCFLAG_HASHTAKEN), we can give it a hash field, and the # end of the new object cannot move to the right of the end of # the old object. If it stays where it is, then we don't need # to add the hash field. So collecting can never actually grow # the consumed size. obj = llmemory.cast_ptr_to_adr(gcobj) hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end obj += self.get_size(obj) return obj.signed[0] # hdr.tid |= GCFLAG_HASHTAKEN return llmemory.cast_adr_to_int(obj) # direct case
def _generalcast(T, value): if lltype.typeOf(value) == T: return value elif isinstance(T, lltype.Ptr): return lltype.cast_pointer(T, value) elif T == llmemory.Address: return llmemory.cast_ptr_to_adr(value) elif isinstance(T, ootype.StaticMethod): fn = value._obj return ootype._static_meth(T, graph=fn.graph, _callable=fn._callable) else: T1 = lltype.typeOf(value) if T1 is llmemory.Address: value = llmemory.cast_adr_to_int(value) elif isinstance(T1, lltype.Ptr): value = lltype.cast_ptr_to_int(value) else: value = value return lltype.cast_primitive(T, value)
def identityhash(self, gcobj): # Unlike SemiSpaceGC.identityhash(), this function does not have # to care about reducing top_of_space. The reason is as # follows. When we collect, each object either moves to the # left or stays where it is. If it moves to the left (and if it # has GCFLAG_HASHTAKEN), we can give it a hash field, and the # end of the new object cannot move to the right of the end of # the old object. If it stays where it is, then we don't need # to add the hash field. So collecting can never actually grow # the consumed size. obj = llmemory.cast_ptr_to_adr(gcobj) hdr = self.header(obj) # if hdr.tid & GCFLAG_HASHFIELD: # the hash is in a field at the end obj += self.get_size(obj) return obj.signed[0] # hdr.tid |= GCFLAG_HASHTAKEN return llmemory.cast_adr_to_int(obj) # direct case
def _make_a_copy_with_tid(self, obj, objsize, tid): totalsize = self.size_gc_header() + objsize newaddr = self.free llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash totalsize += llmemory.sizeof(lltype.Signed) # self.free += totalsize newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR)) newhdr.tid = tid newobj = newaddr + self.size_gc_header() return newobj
def _make_a_copy_with_tid(self, obj, objsize, tid): totalsize = self.size_gc_header() + objsize newaddr = self.free llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash totalsize += llmemory.sizeof(lltype.Signed) # self.free += totalsize newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR)) newhdr.tid = tid newobj = newaddr + self.size_gc_header() return newobj
def make_a_nonmoving_copy(self, obj, objsize): # NB. the object can have a finalizer or be a weakref, but # it's not an issue. totalsize = self.size_gc_header() + objsize tid = self.header(obj).tid if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD): totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed) else: totalsize_incl_hash = totalsize newaddr = self.allocate_external_object(totalsize_incl_hash) if not newaddr: return llmemory.NULL # can't raise MemoryError during a collect() if self.config.gcconfig.debugprint: self._nonmoving_copy_count += 1 self._nonmoving_copy_size += raw_malloc_usage(totalsize) llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash # # GCFLAG_UNVISITED is not set # GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be # set by the next collection's collect_last_generation_roots(). # This old object is immediately put at generation 3. newobj = newaddr + self.size_gc_header() hdr = self.header(newobj) hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS ll_assert(self.is_last_generation(newobj), "make_a_nonmoving_copy: object too young") self.gen3_rawmalloced_objects.append(newobj) self.last_generation_root_objects.append(newobj) self.rawmalloced_objects_to_trace.append(newobj) # visit me return newobj
def make_a_nonmoving_copy(self, obj, objsize): # NB. the object can have a finalizer or be a weakref, but # it's not an issue. totalsize = self.size_gc_header() + objsize tid = self.header(obj).tid if tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD): totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed) else: totalsize_incl_hash = totalsize newaddr = self.allocate_external_object(totalsize_incl_hash) if not newaddr: return llmemory.NULL # can't raise MemoryError during a collect() if self.config.gcconfig.debugprint: self._nonmoving_copy_count += 1 self._nonmoving_copy_size += raw_malloc_usage(totalsize) llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash # # GCFLAG_UNVISITED is not set # GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be # set by the next collection's collect_last_generation_roots(). # This old object is immediately put at generation 3. newobj = newaddr + self.size_gc_header() hdr = self.header(newobj) hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS ll_assert(self.is_last_generation(newobj), "make_a_nonmoving_copy: object too young") self.gen3_rawmalloced_objects.append(newobj) self.last_generation_root_objects.append(newobj) self.rawmalloced_objects_to_trace.append(newobj) # visit me return newobj
def operand2(self): addr = self.addr s = str(llmemory.cast_adr_to_int(addr)) if s == '0': s = 'null' return s
def is_valid_gc_object(self, addr): return (addr != NULL and (not self.config.taggedpointers or llmemory.cast_adr_to_int(addr) & 1 == 0))
def ll_identityhash(addr): obj = llmemory.cast_adr_to_ptr(addr, HDRPTR) h = obj.hash if h == 0: obj.hash = h = ~llmemory.cast_adr_to_int(addr) return h
def ll_addrhash(addr1): return cast_adr_to_int(addr1)
def ll_str(self, a): from pypy.rpython.lltypesystem.rstr import ll_str id = cast_adr_to_int(a) return ll_str.ll_int2hex(r_uint(id), True)
def f(): a1 = llmemory.cast_ptr_to_adr(s1) i1 = llmemory.cast_adr_to_int(a1) a2 = llmemory.cast_int_to_adr(i1) s2 = llmemory.cast_adr_to_ptr(a2, lltype.Ptr(S1)) return int(s1 == s2)
def adr2int(addr): # Cast an address to an int. Returns an AddressAsInt object which # can be cast back to an address. return llmemory.cast_adr_to_int(addr, "symbolic")
def adr2int(addr): # Cast an address to an int. Returns an AddressAsInt object which # can be cast back to an address. return llmemory.cast_adr_to_int(addr, "symbolic")
def ll_identityhash(addr): obj = llmemory.cast_adr_to_ptr(addr, HDRPTR) h = obj.hash if h == 0: obj.hash = h = llmemory.cast_adr_to_int(addr) return h
def _writeref(self, pointer, _): obj = pointer.address[0] self.write(llmemory.cast_adr_to_int(obj)) self.add(obj)