def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only if not we_are_translated(): if source == dest: assert (source_start + length <= dest_start or dest_start + length <= source_start) TP = lltype.typeOf(source).TO assert TP == lltype.typeOf(dest).TO if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc': # perform a write barrier that copies necessary flags from # source to dest if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest): # if the write barrier is not supported, copy by hand for i in range(length): dest[i + dest_start] = source[i + source_start] return source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * source_start) cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * dest_start) llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, llmemory.sizeof(TP.OF) * length) keepalive_until_here(source) keepalive_until_here(dest)
def str_from_buffer(raw_buf, gc_buf, allocated_size, needed_size): """ Converts from a pair returned by alloc_buffer to a high-level string. The returned string will be truncated to needed_size. """ assert allocated_size >= needed_size if gc_buf and (allocated_size == needed_size): return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) try: str_chars_offset = (offsetof(STRTYPE, 'chars') + \ itemoffsetof(STRTYPE.chars, 0)) if gc_buf: src = cast_ptr_to_adr(gc_buf) + str_chars_offset else: src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) dest = cast_ptr_to_adr(new_buf) + str_chars_offset ## FIXME: This is bad, because dest could potentially move ## if there are threads involved. raw_memcopy(src, dest, llmemory.sizeof(ll_char_type) * needed_size) return hlstrtype(new_buf) finally: keepalive_until_here(new_buf)
def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field): # 'basesize' is the size without any hash field # restore the normal header hdr = self.header_forwarded(obj) gcflags = hdr.tid & 3 if grow_hash_field: gcflags |= GCFLAG_SAVED_HASHFIELD hashvalue = self.get_identityhash_from_addr(obj) elif gcflags & GCFLAG_SAVED_HASHFIELD: fromaddr = llarena.getfakearenaaddress(obj) fromaddr -= self.gcheaderbuilder.size_gc_header hashvalue = (fromaddr + basesize).signed[0] else: hashvalue = 0 # not used # hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit) # fromaddr = obj - self.gcheaderbuilder.size_gc_header if translated_to_c(): llmemory.raw_memmove(fromaddr, toaddr, basesize) else: llmemory.raw_memcopy(fromaddr, toaddr, basesize) # if gcflags & GCFLAG_SAVED_HASHFIELD: (toaddr + basesize).signed[0] = hashvalue
def make_a_nonmoving_copy(self, obj, objsize): # NB. the object can have a finalizer or be a weakref, but # it's not an issue. totalsize = self.size_gc_header() + objsize newaddr = self.allocate_external_object(totalsize) if not newaddr: return llmemory.NULL # can't raise MemoryError during a collect() if DEBUG_PRINT: self._nonmoving_copy_count += 1 self._nonmoving_copy_size += raw_malloc_usage(totalsize) llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) newobj = newaddr + self.size_gc_header() hdr = self.header(newobj) hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS # GCFLAG_UNVISITED is not set # GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be # set by the next collection's collect_last_generation_roots(). # This old object is immediately put at generation 3. ll_assert(self.is_last_generation(newobj), "make_a_nonmoving_copy: object too young") self.gen3_rawmalloced_objects.append(newobj) self.last_generation_root_objects.append(newobj) self.rawmalloced_objects_to_trace.append(newobj) # visit me return newobj
def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only if not we_are_translated(): if source == dest: assert (source_start + length <= dest_start or dest_start + length <= source_start) TP = lltype.typeOf(source).TO assert TP == lltype.typeOf(dest).TO if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc': # perform a write barrier that copies necessary flags from # source to dest if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest): # if the write barrier is not supported, copy by hand for i in range(length): dest[i + dest_start] = source[i + source_start] return source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * source_start) cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * dest_start) llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, llmemory.sizeof(TP.OF) * length) keepalive_until_here(source) keepalive_until_here(dest)
def ll_shrink_array(p, smallerlength): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): return p # done by the GC # XXX we assume for now that the type of p is GcStruct containing a # variable array, with no further pointers anywhere, and exactly one # field in the fixed part -- like STR and UNICODE. TP = lltype.typeOf(p).TO newp = lltype.malloc(TP, smallerlength) assert len(TP._names) == 2 field = getattr(p, TP._names[0]) setattr(newp, TP._names[0], field) ARRAY = getattr(TP, TP._arrayfld) offset = (llmemory.offsetof(TP, TP._arrayfld) + llmemory.itemoffsetof(ARRAY, 0)) source_addr = llmemory.cast_ptr_to_adr(p) + offset dest_addr = llmemory.cast_ptr_to_adr(newp) + offset llmemory.raw_memcopy(source_addr, dest_addr, llmemory.sizeof(ARRAY.OF) * smallerlength) keepalive_until_here(p) keepalive_until_here(newp) return newp
def copy_string_contents(s1, s2, s1start, s2start, lgt): assert s1start >= 0 assert s2start >= 0 assert lgt >= 0 src = llmemory.cast_ptr_to_adr(s1) + _str_ofs(s1start) dest = llmemory.cast_ptr_to_adr(s2) + _str_ofs(s2start) llmemory.raw_memcopy(src, dest, llmemory.sizeof(CHAR_TP) * lgt)
def copy_string_contents(src, dst, srcstart, dststart, length): assert srcstart >= 0 assert dststart >= 0 assert length >= 0 src = llmemory.cast_ptr_to_adr(src) + _str_ofs(srcstart) dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length)
def make_a_nonmoving_copy(self, obj, objsize): # NB. the object can have a finalizer or be a weakref, but # it's not an issue. totalsize = self.size_gc_header() + objsize newaddr = self.allocate_external_object(totalsize) if not newaddr: return llmemory.NULL # can't raise MemoryError during a collect() if self.config.gcconfig.debugprint: self._nonmoving_copy_count += 1 self._nonmoving_copy_size += raw_malloc_usage(totalsize) llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) newobj = newaddr + self.size_gc_header() hdr = self.header(newobj) hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS # GCFLAG_UNVISITED is not set # GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be # set by the next collection's collect_last_generation_roots(). # This old object is immediately put at generation 3. ll_assert(self.is_last_generation(newobj), "make_a_nonmoving_copy: object too young") self.gen3_rawmalloced_objects.append(newobj) self.last_generation_root_objects.append(newobj) self.rawmalloced_objects_to_trace.append(newobj) # visit me return newobj
def make_a_nonmoving_copy(self, obj, objsize): # NB. the object can have a finalizer or be a weakref, but # it's not an issue. totalsize = self.size_gc_header() + objsize tid = self.header(obj).tid if tid & GCFLAG_HASHMASK: totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed) else: totalsize_incl_hash = totalsize newaddr = self.allocate_external_object(totalsize_incl_hash) if not newaddr: return llmemory.NULL # can't raise MemoryError during a collect() self._nonmoving_copy_count += 1 self._nonmoving_copy_size += raw_malloc_usage(totalsize) llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) if tid & GCFLAG_HASHMASK: hash = self._get_object_hash(obj, objsize, tid) (newaddr + totalsize).signed[0] = hash tid |= GC_HASH_HASFIELD # # GCFLAG_UNVISITED is not set # GCFLAG_NO_HEAP_PTRS is not set either, conservatively. It may be # set by the next collection's collect_last_generation_roots(). # This old object is immediately put at generation 3. newobj = newaddr + self.size_gc_header() hdr = self.header(newobj) hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS ll_assert(self.is_last_generation(newobj), "make_a_nonmoving_copy: object too young") self.gen3_rawmalloced_objects.append(newobj) self.last_generation_root_objects.append(newobj) self.rawmalloced_objects_to_trace.append(newobj) # visit me return newobj
def make_a_copy(self, obj, objsize): totalsize = self.size_gc_header() + objsize newaddr = self.free self.free += totalsize llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) newobj = newaddr + self.size_gc_header() return newobj
def longername(a, b, size): if 1: baseofs = itemoffsetof(TP, 0) onesize = sizeof(TP.OF) size = baseofs + onesize*(size - 1) raw_memcopy(cast_ptr_to_adr(b)+baseofs, cast_ptr_to_adr(a)+baseofs, size) else: a = [] for i in range(x): a.append(i) return 0
def f(): addr = llmemory.raw_malloc(100) addr.signed[0] = 12 (addr + 10).signed[0] = 42 (addr + 20).char[0] = "a" addr1 = llmemory.raw_malloc(100) llmemory.raw_memcopy(addr, addr1, 100) result = addr1.signed[0] == 12 result = result and (addr1 + 10).signed[0] == 42 result = result and (addr1 + 20).char[0] == "a" llmemory.raw_free(addr) llmemory.raw_free(addr1) return result
def longername(a, b, size): if 1: baseofs = itemoffsetof(TP, 0) onesize = sizeof(TP.OF) size = baseofs + onesize * (size - 1) raw_memcopy( cast_ptr_to_adr(b) + baseofs, cast_ptr_to_adr(a) + baseofs, size) else: a = [] for i in range(x): a.append(i) return 0
def _ll_list_resize_really(l, newsize): """ Ensure l.items has room for at least newsize elements, and set l.length to newsize. Note that l.items may change, and even if newsize is less than l.length on entry. """ allocated = len(l.items) # This over-allocates proportional to the list size, making room # for additional growth. The over-allocation is mild, but is # enough to give linear-time amortized behavior over a long # sequence of appends() in the presence of a poorly-performing # system malloc(). # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... if newsize <= 0: ll_assert(newsize == 0, "negative list length") l.length = 0 l.items = _ll_new_empty_item_array(typeOf(l).TO) return else: if newsize < 9: some = 3 else: some = 6 some += newsize >> 3 try: new_allocated = ovfcheck(newsize + some) except OverflowError: raise MemoryError # XXX consider to have a real realloc items = l.items newitems = malloc(typeOf(l).TO.items.TO, new_allocated) before_len = l.length if before_len < new_allocated: p = before_len - 1 else: p = new_allocated - 1 ITEM = typeOf(l).TO.ITEM if isinstance(ITEM, Ptr): while p >= 0: newitems[p] = items[p] items[p] = nullptr(ITEM.TO) p -= 1 else: source = cast_ptr_to_adr(items) + itemoffsetof(typeOf(l.items).TO, 0) dest = cast_ptr_to_adr(newitems) + itemoffsetof(typeOf(l.items).TO, 0) s = p + 1 raw_memcopy(source, dest, sizeof(ITEM) * s) keepalive_until_here(items) l.length = newsize l.items = newitems
def _make_a_copy_with_tid(self, obj, objsize, tid): totalsize = self.size_gc_header() + objsize newaddr = self.free llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) if tid & GCFLAG_HASHMASK: hash = self._get_object_hash(obj, objsize, tid) llarena.arena_reserve(newaddr + totalsize, llmemory.sizeof(lltype.Signed)) (newaddr + totalsize).signed[0] = hash tid |= GC_HASH_HASFIELD totalsize += llmemory.sizeof(lltype.Signed) self.free += totalsize newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR)) newhdr.tid = tid newobj = newaddr + self.size_gc_header() return newobj
def copy_string_contents(src, dst, srcstart, dststart, length): """Copies 'length' characters from the 'src' string to the 'dst' string, starting at position 'srcstart' and 'dststart'.""" # xxx Warning: don't try to do this at home. It relies on a lot # of details to be sure that it works correctly in all cases. # Notably: no GC operation at all from the first cast_ptr_to_adr() # because it might move the strings. The keepalive_until_here() # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). assert srcstart >= 0 assert dststart >= 0 assert length >= 0 src = llmemory.cast_ptr_to_adr(src) + _str_ofs(srcstart) dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) keepalive_until_here(src) keepalive_until_here(dst)
def copy(self, obj): if self.is_forwarded(obj): #llop.debug_print(lltype.Void, obj, "already copied to", self.get_forwarding_address(obj)) return self.get_forwarding_address(obj) else: newaddr = self.free objsize = self.get_size(obj) totalsize = self.size_gc_header() + objsize llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) self.free += totalsize newobj = newaddr + self.size_gc_header() #llop.debug_print(lltype.Void, obj, "copied to", newobj, # "tid", self.header(obj).tid, # "size", totalsize) self.set_forwarding_address(obj, newobj, objsize) return newobj
def _make_a_copy_with_tid(self, obj, objsize, tid): totalsize = self.size_gc_header() + objsize newaddr = self.free llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash totalsize += llmemory.sizeof(lltype.Signed) # self.free += totalsize newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR)) newhdr.tid = tid newobj = newaddr + self.size_gc_header() return newobj
def _make_a_copy_with_tid(self, obj, objsize, tid): totalsize = self.size_gc_header() + objsize newaddr = self.free llarena.arena_reserve(newaddr, totalsize) raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize) # # check if we need to write a hash value at the end of the new obj if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD): if tid & GCFLAG_HASHFIELD: hash = (obj + objsize).signed[0] else: hash = llmemory.cast_adr_to_int(obj) tid |= GCFLAG_HASHFIELD (newaddr + totalsize).signed[0] = hash totalsize += llmemory.sizeof(lltype.Signed) # self.free += totalsize newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR)) newhdr.tid = tid newobj = newaddr + self.size_gc_header() return newobj
def str_from_buffer(raw_buf, gc_buf, allocated_size, needed_size): """ Converts from a pair returned by alloc_buffer to a high-level string. The returned string will be truncated to needed_size. """ assert allocated_size >= needed_size if gc_buf and (allocated_size == needed_size): return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) str_chars_offset = offsetof(STRTYPE, "chars") + itemoffsetof(STRTYPE.chars, 0) if gc_buf: src = cast_ptr_to_adr(gc_buf) + str_chars_offset else: src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) dest = cast_ptr_to_adr(new_buf) + str_chars_offset raw_memcopy(src, dest, llmemory.sizeof(ll_char_type) * needed_size) keepalive_until_here(gc_buf) keepalive_until_here(new_buf) return hlstrtype(new_buf)
def str_from_buffer(raw_buf, gc_buf, allocated_size, needed_size): """ Converts from a pair returned by alloc_buffer to a high-level string. The returned string will be truncated to needed_size. """ assert allocated_size >= needed_size if gc_buf and (allocated_size == needed_size): return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) str_chars_offset = (offsetof(STRTYPE, 'chars') + \ itemoffsetof(STRTYPE.chars, 0)) if gc_buf: src = cast_ptr_to_adr(gc_buf) + str_chars_offset else: src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) dest = cast_ptr_to_adr(new_buf) + str_chars_offset raw_memcopy(src, dest, llmemory.sizeof(ll_char_type) * needed_size) keepalive_until_here(gc_buf) keepalive_until_here(new_buf) return hlstrtype(new_buf)
def op_raw_memcopy(self, fromaddr, toaddr, size): checkadr(fromaddr) checkadr(toaddr) llmemory.raw_memcopy(fromaddr, toaddr, size)
def x_clone(self, clonedata): # Recursively clone the gcobject and everything it points to, # directly or indirectly -- but stops at objects that are not # in the specified pool. A new pool is built to contain the # copies, and the 'gcobjectptr' and 'pool' fields of clonedata # are adjusted to refer to the result. # install a new pool into which all the mallocs go curpool = self.x_swap_pool(lltype.nullptr(X_POOL)) size_gc_header = self.gcheaderbuilder.size_gc_header oldobjects = self.AddressStack() # if no pool specified, use the current pool as the 'source' pool oldpool = clonedata.pool or curpool oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool) addr = llmemory.cast_ptr_to_adr(oldpool) addr -= size_gc_header hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR) hdr = hdr.next # skip the POOL object itself while hdr: next = hdr.next # mark all objects from malloced_list hdr.flags = chr(ord(hdr.flags) | FL_CURPOOL) hdr.next = lltype.nullptr(self.HDR) # abused to point to the copy oldobjects.append(llmemory.cast_ptr_to_adr(hdr)) hdr = next # a stack of addresses of places that still points to old objects # and that must possibly be fixed to point to a new copy stack = self.AddressStack() stack.append( llmemory.cast_ptr_to_adr(clonedata) + llmemory.offsetof(X_CLONE, 'gcobjectptr')) while stack.non_empty(): gcptr_addr = stack.pop() oldobj_addr = gcptr_addr.address[0] if not oldobj_addr: continue # pointer is NULL oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header, self.HDRPTR) if not (ord(oldhdr.flags) & FL_CURPOOL): continue # ignore objects that were not in the malloced_list newhdr = oldhdr.next # abused to point to the copy if not newhdr: typeid = oldhdr.typeid16 size = self.fixed_size(typeid) # XXX! collect() at the beginning if the free heap is low if self.is_varsize(typeid): itemsize = self.varsize_item_sizes(typeid) offset_to_length = self.varsize_offset_to_length(typeid) length = (oldobj_addr + offset_to_length).signed[0] newobj = self.malloc_varsize(typeid, length, size, itemsize, offset_to_length, False) size += length * itemsize else: newobj = self.malloc_fixedsize(typeid, size, False) length = -1 newobj_addr = llmemory.cast_ptr_to_adr(newobj) #llop.debug_print(lltype.Void, 'clone', # llmemory.cast_adr_to_int(oldobj_addr), # '->', llmemory.cast_adr_to_int(newobj_addr), # 'typeid', typeid, # 'length', length) newhdr_addr = newobj_addr - size_gc_header newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR) saved_id = newhdr.typeid16 # XXX hack needed for genc saved_flg1 = newhdr.mark saved_flg2 = newhdr.flags saved_next = newhdr.next # where size_gc_header == 0 raw_memcopy(oldobj_addr, newobj_addr, size) newhdr.typeid16 = saved_id newhdr.mark = saved_flg1 newhdr.flags = saved_flg2 newhdr.next = saved_next offsets = self.offsets_to_gc_pointers(typeid) i = 0 while i < len(offsets): pointer_addr = newobj_addr + offsets[i] stack.append(pointer_addr) i += 1 if length > 0: offsets = self.varsize_offsets_to_gcpointers_in_var_part( typeid) itemlength = self.varsize_item_sizes(typeid) offset = self.varsize_offset_to_variable_part(typeid) itembaseaddr = newobj_addr + offset i = 0 while i < length: item = itembaseaddr + itemlength * i j = 0 while j < len(offsets): pointer_addr = item + offsets[j] stack.append(pointer_addr) j += 1 i += 1 oldhdr.next = newhdr newobj_addr = llmemory.cast_ptr_to_adr(newhdr) + size_gc_header gcptr_addr.address[0] = newobj_addr stack.delete() # re-create the original linked list next = lltype.nullptr(self.HDR) while oldobjects.non_empty(): hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR) hdr.flags = chr(ord(hdr.flags) & ~FL_CURPOOL) # reset the flag hdr.next = next next = hdr oldobjects.delete() # consistency check addr = llmemory.cast_ptr_to_adr(oldpool) addr -= size_gc_header hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR) assert hdr.next == next # build the new pool object collecting the new objects, and # reinstall the pool that was current at the beginning of x_clone() clonedata.pool = self.x_swap_pool(curpool)
def x_clone(self, clonedata): # Recursively clone the gcobject and everything it points to, # directly or indirectly -- but stops at objects that are not # in the specified pool. A new pool is built to contain the # copies, and the 'gcobjectptr' and 'pool' fields of clonedata # are adjusted to refer to the result. # install a new pool into which all the mallocs go curpool = self.x_swap_pool(lltype.nullptr(X_POOL)) size_gc_header = self.gcheaderbuilder.size_gc_header oldobjects = self.AddressStack() # if no pool specified, use the current pool as the 'source' pool oldpool = clonedata.pool or curpool oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool) addr = llmemory.cast_ptr_to_adr(oldpool) addr -= size_gc_header hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR) hdr = hdr.next # skip the POOL object itself while hdr: next = hdr.next # mark all objects from malloced_list hdr.flags = chr(ord(hdr.flags) | FL_CURPOOL) hdr.next = lltype.nullptr(self.HDR) # abused to point to the copy oldobjects.append(llmemory.cast_ptr_to_adr(hdr)) hdr = next # a stack of addresses of places that still points to old objects # and that must possibly be fixed to point to a new copy stack = self.AddressStack() stack.append(llmemory.cast_ptr_to_adr(clonedata) + llmemory.offsetof(X_CLONE, 'gcobjectptr')) while stack.non_empty(): gcptr_addr = stack.pop() oldobj_addr = gcptr_addr.address[0] if not oldobj_addr: continue # pointer is NULL oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header, self.HDRPTR) if not (ord(oldhdr.flags) & FL_CURPOOL): continue # ignore objects that were not in the malloced_list newhdr = oldhdr.next # abused to point to the copy if not newhdr: typeid = oldhdr.typeid16 size = self.fixed_size(typeid) # XXX! collect() at the beginning if the free heap is low if self.is_varsize(typeid): itemsize = self.varsize_item_sizes(typeid) offset_to_length = self.varsize_offset_to_length(typeid) length = (oldobj_addr + offset_to_length).signed[0] newobj = self.malloc_varsize(typeid, length, size, itemsize, offset_to_length, False) size += length*itemsize else: newobj = self.malloc_fixedsize(typeid, size, False) length = -1 newobj_addr = llmemory.cast_ptr_to_adr(newobj) #llop.debug_print(lltype.Void, 'clone', # llmemory.cast_adr_to_int(oldobj_addr), # '->', llmemory.cast_adr_to_int(newobj_addr), # 'typeid', typeid, # 'length', length) newhdr_addr = newobj_addr - size_gc_header newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR) saved_id = newhdr.typeid16 # XXX hack needed for genc saved_flg1 = newhdr.mark saved_flg2 = newhdr.flags saved_next = newhdr.next # where size_gc_header == 0 raw_memcopy(oldobj_addr, newobj_addr, size) newhdr.typeid16 = saved_id newhdr.mark = saved_flg1 newhdr.flags = saved_flg2 newhdr.next = saved_next offsets = self.offsets_to_gc_pointers(typeid) i = 0 while i < len(offsets): pointer_addr = newobj_addr + offsets[i] stack.append(pointer_addr) i += 1 if length > 0: offsets = self.varsize_offsets_to_gcpointers_in_var_part( typeid) itemlength = self.varsize_item_sizes(typeid) offset = self.varsize_offset_to_variable_part(typeid) itembaseaddr = newobj_addr + offset i = 0 while i < length: item = itembaseaddr + itemlength * i j = 0 while j < len(offsets): pointer_addr = item + offsets[j] stack.append(pointer_addr) j += 1 i += 1 oldhdr.next = newhdr newobj_addr = llmemory.cast_ptr_to_adr(newhdr) + size_gc_header gcptr_addr.address[0] = newobj_addr stack.delete() # re-create the original linked list next = lltype.nullptr(self.HDR) while oldobjects.non_empty(): hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR) hdr.flags = chr(ord(hdr.flags) &~ FL_CURPOOL) # reset the flag hdr.next = next next = hdr oldobjects.delete() # consistency check addr = llmemory.cast_ptr_to_adr(oldpool) addr -= size_gc_header hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR) assert hdr.next == next # build the new pool object collecting the new objects, and # reinstall the pool that was current at the beginning of x_clone() clonedata.pool = self.x_swap_pool(curpool)