Exemplo n.º 1
0
def ll_memcpy(dst, src, length):
    C_ARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1))
    c_src = llmemory.cast_adr_to_ptr(src, C_ARRAY)
    c_dst = llmemory.cast_adr_to_ptr(dst, C_ARRAY)
    for i in range(length):
        c_dst[i] = c_src[i]
    return dst
Exemplo n.º 2
0
 def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False,
                      contains_weakptr=False):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         tot_size = size_gc_header + size
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid = typeid << 1
     if has_finalizer:
         hdr.next = self.malloced_objects_with_finalizer
         self.malloced_objects_with_finalizer = hdr
     elif contains_weakptr:
         hdr.next = self.objects_with_weak_pointers
         self.objects_with_weak_pointers = hdr
     else:
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid, tot_size, result, False)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Exemplo n.º 3
0
    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                             offset_to_length):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Exemplo n.º 4
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                          offset_to_length, can_collect):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         fixsize = size_gc_header + size
         varsize = ovfcheck(itemsize * length)
         tot_size = ovfcheck(fixsize + varsize)
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     raw_memclear(result, tot_size)        
     (result + size_gc_header + offset_to_length).signed[0] = length
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid16 = typeid16
     hdr.mark = False
     hdr.flags = '\x00'
     hdr.next = self.malloced_objects
     self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
         
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
     #                 'typeid', typeid16,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid16, tot_size, result, True)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Exemplo n.º 5
0
 def malloc_fixedsize(self, typeid16, size, can_collect,
                      has_finalizer=False, contains_weakptr=False):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         tot_size = size_gc_header + size
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid16 = typeid16
     hdr.mark = False
     hdr.flags = '\x00'
     if has_finalizer:
         hdr.next = self.malloced_objects_with_finalizer
         self.malloced_objects_with_finalizer = hdr
     elif contains_weakptr:
         hdr.next = self.objects_with_weak_pointers
         self.objects_with_weak_pointers = hdr
     else:
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid16, tot_size, result, False)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Exemplo n.º 6
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Exemplo n.º 7
0
 def ll_dealloc(addr):
     # bump refcount to 1
     gcheader = llmemory.cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
     gcheader.refcount = 1
     v = llmemory.cast_adr_to_ptr(addr, QUERY_ARG_TYPE)
     rtti = queryptr(v)
     gcheader.refcount = 0
     llop.gc_call_rtti_destructor(lltype.Void, rtti, addr)
Exemplo n.º 8
0
 def ll_dealloc(addr):
     # bump refcount to 1
     gcheader = llmemory.cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
     gcheader.refcount = 1
     v = llmemory.cast_adr_to_ptr(addr, QUERY_ARG_TYPE)
     rtti = queryptr(v)
     gcheader.refcount = 0
     llop.gc_call_rtti_destructor(lltype.Void, rtti, addr)
Exemplo n.º 9
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty()
             or self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(
         size_of_alive_objs, needed + num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(
         tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs, lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space, lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty()
             or self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space = toaddr
     self.free = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Exemplo n.º 10
0
 def customtrace(obj, prev):
     # a simple but not JIT-ready version
     if not prev:
         next = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR).base
     else:
         next = prev + sizeofaddr
     if next == llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR).top:
         next = llmemory.NULL
     return next
Exemplo n.º 11
0
 def customtrace(obj, prev):
     # a simple but not JIT-ready version
     if not prev:
         next = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR).base
     else:
         next = prev + sizeofaddr
     if next == llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR).top:
         next = llmemory.NULL
     return next
Exemplo n.º 12
0
 def cast_adr_to_int(self, adr):
     if not adr:
         return 0
     try:
         ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_FUNCPTR)
         assert ptr._obj._callable == \
                llop1._write_barrier_failing_case
         return 42
     except lltype.InvalidCast:
         ptr = llmemory.cast_adr_to_ptr(
             adr, gc_ll_descr.WB_ARRAY_FUNCPTR)
         assert ptr._obj._callable == \
                llop1._write_barrier_from_array_failing_case
         return 43
Exemplo n.º 13
0
 def cast_adr_to_int(self, adr):
     if not adr:
         return 0
     try:
         ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_FUNCPTR)
         assert ptr._obj._callable == \
                llop1._write_barrier_failing_case
         return 42
     except lltype.InvalidCast:
         ptr = llmemory.cast_adr_to_ptr(
             adr, gc_ll_descr.WB_ARRAY_FUNCPTR)
         assert ptr._obj._callable == \
                llop1._write_barrier_from_array_failing_case
         return 43
Exemplo n.º 14
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 15
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 16
0
 def _mark_root_and_clear_bit(self, root):
     gcobjectaddr = root.address[0]
     self._mark_stack.append(gcobjectaddr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     gc_info = gcobjectaddr - size_gc_header
     hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
     hdr.mark = False
Exemplo n.º 17
0
 def malloc_varsize_clear(self,
                          typeid,
                          length,
                          size,
                          itemsize,
                          offset_to_length,
                          can_collect,
                          has_finalizer=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         if not can_collect:
             raise memoryError
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 18
0
 def init_gc_object_immortal(self, addr, typeid, flags=0):
     # prebuilt gc structures always have the mark bit set
     # ignore flags
     hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
     hdr.typeid16 = typeid
     hdr.mark = True
     hdr.flags = '\x00'
Exemplo n.º 19
0
 def _mark_root_and_clear_bit(self, root):
     gcobjectaddr = root.address[0]
     self._mark_stack.append(gcobjectaddr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     gc_info = gcobjectaddr - size_gc_header
     hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
     hdr.mark = False
Exemplo n.º 20
0
    def do_getsetarrayitem(self, op, oopspec):
        ffitypeval = self.getvalue(op.getarg(1))
        widthval = self.getvalue(op.getarg(2))
        offsetval = self.getvalue(op.getarg(5))
        if not ffitypeval.is_constant() or not widthval.is_constant() or not offsetval.is_constant():
            return [op]

        ffitypeaddr = ffitypeval.box.getaddr()
        ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P)
        offset = offsetval.box.getint()
        width = widthval.box.getint()
        descr = self._get_interior_descr(ffitype, width, offset)

        arglist = [
            self.getvalue(op.getarg(3)).force_box(self.optimizer),
            self.getvalue(op.getarg(4)).force_box(self.optimizer),
        ]
        if oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM:
            opnum = rop.GETINTERIORFIELD_RAW
        elif oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM:
            opnum = rop.SETINTERIORFIELD_RAW
            arglist.append(self.getvalue(op.getarg(6)).force_box(self.optimizer))
        else:
            assert False
        return [
            ResOperation(opnum, arglist, op.result, descr=descr),
        ]
Exemplo n.º 21
0
 def malloc_resizable_buffer(self, TYPE, n):
     typeid = self.get_type_id(TYPE)
     addr = self.gc.malloc(typeid, n)
     result = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
     if not self.gc.malloc_zero_filled:
         gctypelayout.zero_gc_pointers(result)
     return result
Exemplo n.º 22
0
 def init_gc_object_immortal(self, addr, typeid16, flags=0):
     hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
     flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT
     # All prebuilt GC objects have the GCFLAG_MARKBIT always set.
     # That's convenient to make the GC always think that they
     # survive the current collection.
     hdr.tid = self.combine(typeid16, flags)
Exemplo n.º 23
0
 def get_forwarding_address(self, obj):
     tid = self.header(obj).tid
     if tid & GCFLAG_IMMORTAL:
         return obj      # prebuilt objects are "forwarded" to themselves
     else:
         stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
         return stub.forw
Exemplo n.º 24
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0
                                    or ((tid & GCFLAG_HASHTAKEN) != 0
                                        and addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Exemplo n.º 25
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2 * nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks * step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks * step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Exemplo n.º 26
0
 def f():
     adr = llmemory.raw_malloc(sizeofs)
     s = llmemory.cast_adr_to_ptr(adr, STRUCTPTR)
     s.y = 5  # does not crash
     result = (adr + offsety).signed[0] * 10 + int(offsety < sizeofs)
     llmemory.raw_free(adr)
     return result
Exemplo n.º 27
0
 def op_gc_reload_possibly_moved(self, v_newaddr, v_ptr):
     assert v_newaddr.concretetype is llmemory.Address
     assert isinstance(v_ptr.concretetype, lltype.Ptr)
     assert v_ptr.concretetype.TO._gckind == 'gc'
     newaddr = self.getval(v_newaddr)
     p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
     self.setvar(v_ptr, p)
Exemplo n.º 28
0
 def op_gc_reload_possibly_moved(self, v_newaddr, v_ptr):
     assert v_newaddr.concretetype is llmemory.Address
     assert isinstance(v_ptr.concretetype, lltype.Ptr)
     assert v_ptr.concretetype.TO._gckind == 'gc'
     newaddr = self.getval(v_newaddr)
     p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
     self.setvar(v_ptr, p)
Exemplo n.º 29
0
 def llf(n):
     a = malloc(A, 5)
     a[3].x = 42
     adr_s = llmemory.cast_ptr_to_adr(a[0])
     adr_s += size * n
     s = llmemory.cast_adr_to_ptr(adr_s, PS)
     return s.x
Exemplo n.º 30
0
    def do_getsetarrayitem(self, op, oopspec):
        ffitypeval = self.getvalue(op.getarg(1))
        widthval = self.getvalue(op.getarg(2))
        offsetval = self.getvalue(op.getarg(5))
        if not ffitypeval.is_constant() or not widthval.is_constant() or not offsetval.is_constant():
            return [op]

        ffitypeaddr = ffitypeval.box.getaddr()
        ffitype = llmemory.cast_adr_to_ptr(ffitypeaddr, clibffi.FFI_TYPE_P)
        offset = offsetval.box.getint()
        width = widthval.box.getint()
        descr = self._get_interior_descr(ffitype, width, offset)

        arglist = [
            self.getvalue(op.getarg(3)).force_box(self.optimizer),
            self.getvalue(op.getarg(4)).force_box(self.optimizer),
        ]
        if oopspec == EffectInfo.OS_LIBFFI_GETARRAYITEM:
            opnum = rop.GETINTERIORFIELD_RAW
        elif oopspec == EffectInfo.OS_LIBFFI_SETARRAYITEM:
            opnum = rop.SETINTERIORFIELD_RAW
            arglist.append(self.getvalue(op.getarg(6)).force_box(self.optimizer))
        else:
            assert False
        return [
            ResOperation(opnum, arglist, op.result, descr=descr),
        ]
Exemplo n.º 31
0
 def malloc_varsize_slowpath(self,
                             typeid,
                             length,
                             force_nonmovable=False,
                             resizable=False):
     # For objects that are too large, or when the nursery is exhausted.
     # In order to keep malloc_varsize_clear() as compact as possible,
     # we recompute what we need in this slow path instead of passing
     # it all as function arguments.
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + self.fixed_size(typeid)
     itemsize = self.varsize_item_sizes(typeid)
     offset_to_length = self.varsize_offset_to_length(typeid)
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     if self.has_gcptr_in_varsize(typeid):
         nonlarge_max = self.nonlarge_gcptrs_max
     else:
         nonlarge_max = self.nonlarge_max
     if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
         result = self.malloc_varsize_marknsweep(totalsize, resizable)
         flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
     else:
         result = self.malloc_varsize_collecting_nursery(totalsize)
         flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
     self.init_gc_object(result, typeid, flags)
     (result + size_gc_header + offset_to_length).signed[0] = length
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 32
0
 def malloc_resizable_buffer(self, TYPE, n):
     typeid = self.get_type_id(TYPE)
     addr = self.gc.malloc(typeid, n)
     result = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
     if not self.gc.malloc_zero_filled:
         gctypelayout.zero_gc_pointers(result)
     return result
Exemplo n.º 33
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     ll_assert(
         self.header(addr).tid & GCFLAG_EXTERNAL,
         "realloc() on a non-external object")
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect=not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 34
0
 def _setup_object(self, result, typeid16, has_finalizer):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     self.init_gc_object(result, typeid16)
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 35
0
 def f():
     adr = lladdress.raw_malloc(sizeofs)
     s = llmemory.cast_adr_to_ptr(adr, STRUCTPTR)
     s.y = 5 # does not crash
     result = (adr + offsety).signed[0] * 10 + int(offsety < sizeofs)
     lladdress.raw_free(adr)
     return result
Exemplo n.º 36
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     if (has_finalizer or not can_collect or
         (raw_malloc_usage(size) > self.lb_young_var_basesize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   can_collect,
                                                   has_finalizer,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 37
0
 def walk_marked_objects(self, callback):
     num = 0
     size_gc_header = self.gcheaderbuilder.size_gc_header
     fromaddr = self.space
     toaddr = self.base_forwarding_addr
     while fromaddr < self.free:
         hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
         obj = fromaddr + size_gc_header
         survives = self.marked(obj)
         if survives:
             typeid = self.get_typeid_from_backup(num)
             num += 1
         else:
             typeid = self.get_type_id(obj)
         baseobjsize = self._get_size_for_typeid(obj, typeid)
         basesize = size_gc_header + baseobjsize
         totalsrcsize = basesize
         #
         if survives:
             grow_hash_field = False
             if hdr.tid & GCFLAG_SAVED_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
             totaldstsize = totalsrcsize
             if (hdr.tid & (GCFLAG_SAVED_HASHTAKEN|GCFLAG_SAVED_HASHFIELD)
                         == GCFLAG_SAVED_HASHTAKEN):
                 if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                     grow_hash_field = True
                     totaldstsize += llmemory.sizeof(lltype.Signed)
             callback(self, obj, typeid, basesize, toaddr, grow_hash_field)
             toaddr += totaldstsize
         else:
             if hdr.tid & GCFLAG_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
         #
         fromaddr += totalsrcsize
Exemplo n.º 38
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2*nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks*step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks*step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Exemplo n.º 39
0
 def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc,
                collect_static_in_prebuilt_gc):
     gc = self.tester.gc
     layoutbuilder = self.tester.layoutbuilder
     if collect_static_in_prebuilt_gc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_gc(gc, addrofaddr)
     if collect_static_in_prebuilt_nongc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_nongc(gc, addrofaddr)
     if collect_stack_root:
         stackroots = self.tester.stackroots
         a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
         for i in range(len(a)):
             a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
         a_base = lltype.direct_arrayitems(a)
         for i in range(len(a)):
             ai = lltype.direct_ptradd(a_base, i)
             collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
         for i in range(len(a)):
             PTRTYPE = lltype.typeOf(stackroots[i])
             stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
         lltype.free(a, flavor='raw')
Exemplo n.º 40
0
 def init_gc_object_immortal(self, addr, typeid16, flags=0):
     hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
     flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT
     # All prebuilt GC objects have the GCFLAG_MARKBIT always set.
     # That's convenient to make the GC always think that they
     # survive the current collection.
     hdr.tid = self.combine(typeid16, flags)
Exemplo n.º 41
0
 def ll_finalizer(addr):
     try:
         v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
         self.llinterp.eval_graph(destrgraph, [v], recursive=True)
     except llinterp.LLException:
         raise RuntimeError(
             "a finalizer raised an exception, shouldn't happen")
Exemplo n.º 42
0
 def walk_marked_objects(self, callback):
     num = 0
     size_gc_header = self.gcheaderbuilder.size_gc_header
     fromaddr = self.space
     toaddr = self.base_forwarding_addr
     while fromaddr < self.free:
         hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
         obj = fromaddr + size_gc_header
         survives = self.marked(obj)
         if survives:
             typeid = self.get_typeid_from_backup(num)
             num += 1
         else:
             typeid = self.get_type_id(obj)
         baseobjsize = self._get_size_for_typeid(obj, typeid)
         basesize = size_gc_header + baseobjsize
         totalsrcsize = basesize
         #
         if survives:
             grow_hash_field = False
             if hdr.tid & GCFLAG_SAVED_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
             totaldstsize = totalsrcsize
             if hdr.tid & (GCFLAG_SAVED_HASHTAKEN | GCFLAG_SAVED_HASHFIELD) == GCFLAG_SAVED_HASHTAKEN:
                 if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                     grow_hash_field = True
                     totaldstsize += llmemory.sizeof(lltype.Signed)
             callback(self, obj, typeid, basesize, toaddr, grow_hash_field)
             toaddr += totaldstsize
         else:
             if hdr.tid & GCFLAG_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
         #
         fromaddr += totalsrcsize
Exemplo n.º 43
0
 def init_gc_object_immortal(self, addr, typeid, flags=0):
     # prebuilt gc structures always have the mark bit set
     # ignore flags
     hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
     hdr.typeid16 = typeid
     hdr.mark = True
     hdr.flags = '\x00'
Exemplo n.º 44
0
Arquivo: hybrid.py Projeto: ieure/pypy
 def malloc_varsize_slowpath(self, typeid, length, force_nonmovable=False):
     # For objects that are too large, or when the nursery is exhausted.
     # In order to keep malloc_varsize_clear() as compact as possible,
     # we recompute what we need in this slow path instead of passing
     # it all as function arguments.
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + self.fixed_size(typeid)
     itemsize = self.varsize_item_sizes(typeid)
     offset_to_length = self.varsize_offset_to_length(typeid)
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     if self.has_gcptr_in_varsize(typeid):
         nonlarge_max = self.nonlarge_gcptrs_max
     else:
         nonlarge_max = self.nonlarge_max
     if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
         result = self.malloc_varsize_marknsweep(totalsize)
         flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
     else:
         result = self.malloc_varsize_collecting_nursery(totalsize)
         flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
     self.init_gc_object(result, typeid, flags)
     (result + size_gc_header + offset_to_length).signed[0] = length
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 45
0
 def ll_finalizer(addr):
     try:
         v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
         self.llinterp.eval_graph(destrgraph, [v], recursive=True)
     except llinterp.LLException:
         raise RuntimeError(
             "a finalizer raised an exception, shouldn't happen")
Exemplo n.º 46
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
                                    ((tid & GCFLAG_HASHTAKEN) != 0 and
                                     addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Exemplo n.º 47
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     tid = self.get_type_id(addr)
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect = not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Exemplo n.º 48
0
 def walk_roots(self, collect_stack_root,
                collect_static_in_prebuilt_nongc,
                collect_static_in_prebuilt_gc):
     gc = self.tester.gc
     layoutbuilder = self.tester.layoutbuilder
     if collect_static_in_prebuilt_gc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_gc(gc, addrofaddr)
     if collect_static_in_prebuilt_nongc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_nongc(gc, addrofaddr)
     if collect_stack_root:
         stackroots = self.tester.stackroots
         a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
         for i in range(len(a)):
             a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
         a_base = lltype.direct_arrayitems(a)
         for i in range(len(a)):
             ai = lltype.direct_ptradd(a_base, i)
             collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
         for i in range(len(a)):
             PTRTYPE = lltype.typeOf(stackroots[i])
             stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
         lltype.free(a, flavor='raw')
Exemplo n.º 49
0
 def ll_decref(adr, dealloc):
     if adr:
         gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
         refcount = gcheader.refcount - 1
         gcheader.refcount = refcount
         if refcount == 0:
             dealloc(adr)
Exemplo n.º 50
0
 def ll_decref(adr, dealloc):
     if adr:
         gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
         refcount = gcheader.refcount - 1
         gcheader.refcount = refcount
         if refcount == 0:
             dealloc(adr)
Exemplo n.º 51
0
 def malloc_fixedsize_clear(self,
                            typeid,
                            size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize
          and raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 52
0
    def update_forward_pointers(self, toaddr, maxnum):
        self.base_forwarding_addr = base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        num = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            # compute the original object size, including the
            # optional hash field
            basesize = size_gc_header + self.get_size(obj)
            totalsrcsize = basesize
            if hdr.tid & GCFLAG_HASHFIELD:  # already a hash field, copy it too
                totalsrcsize += llmemory.sizeof(lltype.Signed)
            #
            if self.marked(obj):
                # the object is marked as suriving.  Compute the new object
                # size
                totaldstsize = totalsrcsize
                if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
                    # grow a new hash field -- with the exception: if
                    # the object actually doesn't move, don't
                    # (otherwise, we get a bogus toaddr > fromaddr)
                    if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                        totaldstsize += llmemory.sizeof(lltype.Signed)
                #
                if not translated_to_c():
                    llarena.arena_reserve(toaddr, basesize)
                    if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
                        llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
                #
                # save the field hdr.tid in the array tid_backup
                ll_assert(num < maxnum, "overflow of the tid_backup table")
                self.tid_backup[num] = self.get_type_id(obj)
                num += 1
                # compute forward_offset, the offset to the future copy
                # of this object
                forward_offset = toaddr - base_forwarding_addr
                # copy the first two gc flags in forward_offset
                ll_assert(forward_offset & 3 == 0, "misalignment!")
                forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
                hdr.tid = forward_offset | GCFLAG_MARKBIT
                ll_assert(self.marked(obj), "re-marking object failed!")
                # done
                toaddr += totaldstsize
            #
            fromaddr += totalsrcsize
            if not translated_to_c():
                assert toaddr - base_forwarding_addr <= fromaddr - self.space
        self.num_alive_objs = num
        self.finaladdr = toaddr

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_ref,  # stack roots
            MarkCompactGC._update_ref,  # static in prebuilt non-gc structures
            MarkCompactGC._update_ref,
        )  # static in prebuilt gc objects
        self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
Exemplo n.º 53
0
def ll_weakref_create(targetaddr):
    link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
    if not link:
        raise MemoryError
    plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
    plink[0] = targetaddr
    llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
    return llmemory.cast_ptr_to_weakrefptr(plink)
Exemplo n.º 54
0
def ll_weakref_create(targetaddr):
    link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
    if not link:
        raise MemoryError
    plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
    plink[0] = targetaddr
    llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
    return llmemory.cast_ptr_to_weakrefptr(plink)
Exemplo n.º 55
0
 def ll_decref_simple(adr):
     if adr:
         gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
         refcount = gcheader.refcount - 1
         if refcount == 0:
             llop.gc_free(lltype.Void, adr)
         else:
             gcheader.refcount = refcount
Exemplo n.º 56
0
    def x_swap_pool(self, newpool):
        # Set newpool as the current pool (create one if newpool == NULL).
        # All malloc'ed objects are put into the current pool;this is a
        # way to separate objects depending on when they were allocated.
        size_gc_header = self.gcheaderbuilder.size_gc_header
        # invariant: each POOL GcStruct is at the _front_ of a linked list
        # of malloced objects.
        oldpool = self.curpool
        #llop.debug_print(lltype.Void, 'x_swap_pool',
        #                 lltype.cast_ptr_to_int(oldpool),
        #                 lltype.cast_ptr_to_int(newpool))
        if not oldpool:
            # make a fresh pool object, which is automatically inserted at the
            # front of the current list
            oldpool = lltype.malloc(self.POOL)
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            # put this new POOL object in the poolnodes list
            node = lltype.malloc(self.POOLNODE, flavor="raw")
            node.linkedlist = hdr
            node.nextnode = self.poolnodes
            self.poolnodes = node
        else:
            # manually insert oldpool at the front of the current list
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            hdr.next = self.malloced_objects

        newpool = lltype.cast_opaque_ptr(self.POOLPTR, newpool)
        if newpool:
            # newpool is at the front of the new linked list to install
            addr = llmemory.cast_ptr_to_adr(newpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            self.malloced_objects = hdr.next
            # invariant: now that objects in the hdr.next list are accessible
            # through self.malloced_objects, make sure they are not accessible
            # via poolnodes (which has a node pointing to newpool):
            hdr.next = lltype.nullptr(self.HDR)
        else:
            # start a fresh new linked list
            self.malloced_objects = lltype.nullptr(self.HDR)
        self.curpool = newpool
        return lltype.cast_opaque_ptr(X_POOL_PTR, oldpool)
Exemplo n.º 57
0
    def x_swap_pool(self, newpool):
        # Set newpool as the current pool (create one if newpool == NULL).
        # All malloc'ed objects are put into the current pool;this is a
        # way to separate objects depending on when they were allocated.
        size_gc_header = self.gcheaderbuilder.size_gc_header
        # invariant: each POOL GcStruct is at the _front_ of a linked list
        # of malloced objects.
        oldpool = self.curpool
        #llop.debug_print(lltype.Void, 'x_swap_pool',
        #                 lltype.cast_ptr_to_int(oldpool),
        #                 lltype.cast_ptr_to_int(newpool))
        if not oldpool:
            # make a fresh pool object, which is automatically inserted at the
            # front of the current list
            oldpool = lltype.malloc(self.POOL)
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            # put this new POOL object in the poolnodes list
            node = lltype.malloc(self.POOLNODE, flavor="raw")
            node.linkedlist = hdr
            node.nextnode = self.poolnodes
            self.poolnodes = node
        else:
            # manually insert oldpool at the front of the current list
            addr = llmemory.cast_ptr_to_adr(oldpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            hdr.next = self.malloced_objects

        newpool = lltype.cast_opaque_ptr(self.POOLPTR, newpool)
        if newpool:
            # newpool is at the front of the new linked list to install
            addr = llmemory.cast_ptr_to_adr(newpool)
            addr -= size_gc_header
            hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
            self.malloced_objects = hdr.next
            # invariant: now that objects in the hdr.next list are accessible
            # through self.malloced_objects, make sure they are not accessible
            # via poolnodes (which has a node pointing to newpool):
            hdr.next = lltype.nullptr(self.HDR)
        else:
            # start a fresh new linked list
            self.malloced_objects = lltype.nullptr(self.HDR)
        self.curpool = newpool
        return lltype.cast_opaque_ptr(X_POOL_PTR, oldpool)
Exemplo n.º 58
0
 def ll_decref_simple(adr):
     if adr:
         gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
         refcount = gcheader.refcount - 1
         if refcount == 0:
             llop.gc_free(lltype.Void, adr)
         else:
             gcheader.refcount = refcount
Exemplo n.º 59
0
 def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, itemsize,
                             offset_to_length):
     p, tid = self._malloc(type_id, size + itemsize * length)
     (p + offset_to_length).signed[0] = length
     p = llmemory.cast_adr_to_ptr(p, RESTYPE)
     self.record.append(("varsize", tid, length, repr(size), repr(itemsize),
                         repr(offset_to_length), p))
     return p