Example #1
0
def get_array_token(T, translate_support_code):
    # T can be an array or a var-sized structure
    if translate_support_code:
        basesize = llmemory.sizeof(T, 0)
        if isinstance(T, lltype.Struct):
            SUBARRAY = getattr(T, T._arrayfld)
            itemsize = llmemory.sizeof(SUBARRAY.OF)
            ofs_length = (llmemory.offsetof(T, T._arrayfld) +
                          llmemory.ArrayLengthOffset(SUBARRAY))
        else:
            itemsize = llmemory.sizeof(T.OF)
            ofs_length = llmemory.ArrayLengthOffset(T)
    else:
        if isinstance(T, lltype.Struct):
            assert T._arrayfld is not None, "%r is not variable-sized" % (T,)
            cstruct = ll2ctypes.get_ctypes_type(T)
            cfield = getattr(cstruct, T._arrayfld)
            before_array_part = cfield.offset
            T = getattr(T, T._arrayfld)
        else:
            before_array_part = 0
        carray = ll2ctypes.get_ctypes_type(T)
        assert carray.length.size == WORD
        ofs_length = before_array_part + carray.length.offset
        basesize = before_array_part + carray.items.offset
        carrayitem = ll2ctypes.get_ctypes_type(T.OF)
        itemsize = ctypes.sizeof(carrayitem)
    return basesize, itemsize, ofs_length
Example #2
0
def test_gc_offsets():
    STRUCT = lltype.GcStruct("S1", ("x", lltype.Signed), ("y", lltype.Char))
    ARRAY = lltype.GcArray(lltype.Signed)
    s1 = llarena.round_up_for_allocation(llmemory.sizeof(STRUCT))
    s2 = llmemory.offsetof(STRUCT, "x")
    s3 = llmemory.ArrayLengthOffset(ARRAY)
    s4 = llmemory.sizeof(ARRAY, 0)
    s5 = llmemory.ArrayItemsOffset(ARRAY)

    def fn():
        return s1 * 100000000 + s2 * 1000000 + s3 * 10000 + s4 * 100 + s5

    mod, f = compile_test(fn, [], gcpolicy="semispace")
    res = f()
    i1 = (res // 100000000) % 100
    i2 = (res // 1000000) % 100
    i3 = (res // 10000) % 100
    i4 = (res // 100) % 100
    i5 = (res // 1) % 100
    assert i1 % 4 == 0
    assert 12 <= i1 <= 24
    assert 4 <= i2 <= i1 - 8
    assert 4 <= i3 <= 12
    assert i4 == i5
    assert i3 + 4 <= i5
Example #3
0
 def walk_marked_objects(self, callback):
     num = 0
     size_gc_header = self.gcheaderbuilder.size_gc_header
     fromaddr = self.space
     toaddr = self.base_forwarding_addr
     while fromaddr < self.free:
         hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
         obj = fromaddr + size_gc_header
         survives = self.marked(obj)
         if survives:
             typeid = self.get_typeid_from_backup(num)
             num += 1
         else:
             typeid = self.get_type_id(obj)
         baseobjsize = self._get_size_for_typeid(obj, typeid)
         basesize = size_gc_header + baseobjsize
         totalsrcsize = basesize
         #
         if survives:
             grow_hash_field = False
             if hdr.tid & GCFLAG_SAVED_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
             totaldstsize = totalsrcsize
             if hdr.tid & (GCFLAG_SAVED_HASHTAKEN | GCFLAG_SAVED_HASHFIELD) == GCFLAG_SAVED_HASHTAKEN:
                 if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                     grow_hash_field = True
                     totaldstsize += llmemory.sizeof(lltype.Signed)
             callback(self, obj, typeid, basesize, toaddr, grow_hash_field)
             toaddr += totaldstsize
         else:
             if hdr.tid & GCFLAG_HASHFIELD:
                 totalsrcsize += llmemory.sizeof(lltype.Signed)
         #
         fromaddr += totalsrcsize
def encode_type_shape(builder, info, TYPE):
    """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
    offsets = offsets_to_gc_pointers(TYPE)
    info.ofstoptrs = builder.offsets2table(offsets, TYPE)
    info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE)
    info.weakptrofs = weakpointer_offset(TYPE)
    if not TYPE._is_varsize():
        #info.isvarsize = False
        #info.gcptrinvarsize = False
        info.fixedsize = llarena.round_up_for_allocation(
            llmemory.sizeof(TYPE))
        info.ofstolength = -1
        # note about round_up_for_allocation(): in the 'info' table
        # we put a rounded-up size only for fixed-size objects.  For
        # varsize ones, the GC must anyway compute the size at run-time
        # and round up that result.
    else:
        #info.isvarsize = True
        info.fixedsize = llmemory.sizeof(TYPE, 0)
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
            ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
            info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
            info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
        else:
            ARRAY = TYPE
            info.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
            info.ofstovar = llmemory.itemoffsetof(TYPE, 0)
        assert isinstance(ARRAY, lltype.Array)
        if ARRAY.OF != lltype.Void:
            offsets = offsets_to_gc_pointers(ARRAY.OF)
        else:
            offsets = ()
        info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
        info.varitemsize = llmemory.sizeof(ARRAY.OF)
Example #5
0
    def gct_malloc_varsize(self, hop):
        def intconst(c): return rmodel.inputconst(lltype.Signed, c)

        op = hop.spaceop
        TYPE = op.result.concretetype.TO
        assert TYPE._is_varsize()

        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
        else:
            ARRAY = TYPE
        assert isinstance(ARRAY, lltype.Array)
        if ARRAY._hints.get('isrpystring', False):
            c_const_size = intconst(llmemory.sizeof(TYPE, 1))
        else:
            c_const_size = intconst(llmemory.sizeof(TYPE, 0))
        c_item_size = intconst(llmemory.sizeof(ARRAY.OF))

        if ARRAY._hints.get("nolength", False):
            v_raw = hop.genop("direct_call",
                               [self.malloc_varsize_no_length_ptr, op.args[-1],
                                c_const_size, c_item_size],
                               resulttype=llmemory.Address)
        else:
            if isinstance(TYPE, lltype.Struct):
                offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
                                   llmemory.ArrayLengthOffset(ARRAY)
            else:
                offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
            v_raw = hop.genop("direct_call",
                               [self.malloc_varsize_ptr, op.args[-1],
                                c_const_size, c_item_size, intconst(offset_to_length)],
                               resulttype=llmemory.Address)
        hop.cast_result(v_raw)
Example #6
0
 def identityhash(self, gcobj):
     # The following code should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         #
         if hdr.tid & GCFLAG_HASHFIELD:  # the hash is in a field at the end
             obj += self.get_size(obj)
             return obj.signed[0]
         #
         if not (hdr.tid & GCFLAG_HASHTAKEN):
             # It's the first time we ask for a hash, and it's not an
             # external object.  Shrink the top of space by the extra
             # hash word that will be needed after a collect.
             shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
             if shrunk_top < self.free:
                 # Cannot shrink!  Do a collection, asking for at least
                 # one word of free space, and try again.  May raise
                 # MemoryError.  Obscure: not called directly, but
                 # across an llop, to make sure that there is the
                 # correct push_roots/pop_roots around the call...
                 llop.gc_obtain_free_space(llmemory.Address,
                                           llmemory.sizeof(lltype.Signed))
                 continue
             # Now we can have side-effects: set GCFLAG_HASHTAKEN
             # and lower the top of space.
             self.top_of_space = shrunk_top
             hdr.tid |= GCFLAG_HASHTAKEN
         #
         return llmemory.cast_adr_to_int(obj)  # direct case
Example #7
0
File: rgc.py Project: ieure/pypy
def ll_arraycopy(source, dest, source_start, dest_start, length):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rlib.objectmodel import keepalive_until_here

    # supports non-overlapping copies only
    if not we_are_translated():
        if source == dest:
            assert (source_start + length <= dest_start or
                    dest_start + length <= source_start)

    TP = lltype.typeOf(source).TO
    assert TP == lltype.typeOf(dest).TO
    if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
        # perform a write barrier that copies necessary flags from
        # source to dest
        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
            # if the write barrier is not supported, copy by hand
            for i in range(length):
                dest[i + dest_start] = source[i + source_start]
            return
    source_addr = llmemory.cast_ptr_to_adr(source)
    dest_addr   = llmemory.cast_ptr_to_adr(dest)
    cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
                      llmemory.sizeof(TP.OF) * source_start)
    cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
                    llmemory.sizeof(TP.OF) * dest_start)
    
    llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
                         llmemory.sizeof(TP.OF) * length)
    keepalive_until_here(source)
    keepalive_until_here(dest)
Example #8
0
 def test_simple_access(self):
     AddressStack = get_address_stack()
     addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr2
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr1
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr0
     assert not ll.non_empty()
     ll.append(addr0)
     ll.delete()
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     ll.append(NULL)
     a = ll.pop()
     assert a == NULL
     ll.delete()
     raw_free(addr2)
     raw_free(addr1)
     raw_free(addr0)
Example #9
0
 def identityhash(self, gcobj):
     # The following loop should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         if hdr.tid & GCFLAG_HASHMASK:
             break
         # It's the first time we ask for a hash, and it's not an
         # external object.  Shrink the top of space by the extra
         # hash word that will be needed after a collect.
         shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
         if shrunk_top < self.free:
             # Cannot shrink!  Do a collection, asking for at least
             # one word of free space, and try again.  May raise
             # MemoryError.  Obscure: not called directly, but
             # across an llop, to make sure that there is the
             # correct push_roots/pop_roots around the call...
             llop.gc_obtain_free_space(llmemory.Address,
                                       llmemory.sizeof(lltype.Signed))
             continue
         else:
             # Now we can have side-effects: lower the top of space
             # and set one of the GC_HASH_TAKEN_xxx flags.
             self.top_of_space = shrunk_top
             if self.is_in_nursery(obj):
                 hdr.tid |= GC_HASH_TAKEN_NURS
             else:
                 hdr.tid |= GC_HASH_TAKEN_ADDR
             break
     # Now we can return the result
     objsize = self.get_size(obj)
     return self._get_object_hash(obj, objsize, hdr.tid)
Example #10
0
    def test_primitive(self):
        assert lltype2ctypes(5) == 5
        assert lltype2ctypes('?') == ord('?')
        assert lltype2ctypes('\xE0') == 0xE0
        assert lltype2ctypes(unichr(1234)) == 1234
        assert ctypes2lltype(lltype.Signed, 5) == 5
        assert ctypes2lltype(lltype.Char, ord('a')) == 'a'
        assert ctypes2lltype(lltype.UniChar, ord(u'x')) == u'x'
        assert ctypes2lltype(lltype.Char, 0xFF) == '\xFF'
        assert lltype2ctypes(5.25) == 5.25
        assert ctypes2lltype(lltype.Float, 5.25) == 5.25
        assert lltype2ctypes(u'x') == ord(u'x')
        res = lltype2ctypes(rffi.r_singlefloat(-3.5))
        assert isinstance(res, ctypes.c_float)
        assert res.value == -3.5
        res = ctypes2lltype(lltype.SingleFloat, ctypes.c_float(-3.5))
        assert isinstance(res, rffi.r_singlefloat)
        assert float(res) == -3.5
        assert lltype2ctypes(rffi.r_ulong(-1)) == sys.maxint * 2 + 1
        res = ctypes2lltype(lltype.Unsigned, sys.maxint * 2 + 1)
        assert (res, type(res)) == (rffi.r_ulong(-1), rffi.r_ulong)

        res = lltype2ctypes(llmemory.sizeof(lltype.Signed))
        assert res == struct.calcsize("l")
        S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
        res = lltype2ctypes(llmemory.sizeof(S))
        assert res == struct.calcsize("ll")

        p = lltype.nullptr(S)
        cptr = lltype2ctypes(p)
        assert not cptr
        py.test.raises(ValueError, 'cptr.contents')   # NULL pointer access
        res = ctypes2lltype(lltype.Ptr(S), cptr)
        assert res == p
        assert not ALLOCATED     # detects memory leaks in the test
Example #11
0
    def varsize_malloc_helper(self, hop, flags, meth, extraargs):
        def intconst(c): return rmodel.inputconst(lltype.Signed, c)
        op = hop.spaceop
        TYPE = op.result.concretetype.TO
        assert TYPE._is_varsize()
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
        else:
            ARRAY = TYPE
        assert isinstance(ARRAY, lltype.Array)
        c_const_size = intconst(llmemory.sizeof(TYPE, 0))
        c_item_size = intconst(llmemory.sizeof(ARRAY.OF))

        if ARRAY._hints.get("nolength", False):
            c_offset_to_length = None
        else:
            if isinstance(TYPE, lltype.Struct):
                offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
                                   llmemory.ArrayLengthOffset(ARRAY)
            else:
                offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
            c_offset_to_length = intconst(offset_to_length)

        args = [hop] + extraargs + [flags, TYPE,
                op.args[-1], c_const_size, c_item_size, c_offset_to_length]
        v_raw = meth(*args)

        hop.cast_result(v_raw)
Example #12
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
                                    ((tid & GCFLAG_HASHTAKEN) != 0 and
                                     addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Example #13
0
def get_size(TYPE, translate_support_code):
    if translate_support_code:
        if TYPE._is_varsize():
            return llmemory.sizeof(TYPE, 0)
        return llmemory.sizeof(TYPE)
    ctype = ll2ctypes.get_ctypes_type(TYPE)
    return ctypes.sizeof(ctype)
Example #14
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2*nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks*step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks*step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Example #15
0
    def update_forward_pointers(self, toaddr, maxnum):
        self.base_forwarding_addr = base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        num = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            # compute the original object size, including the
            # optional hash field
            basesize = size_gc_header + self.get_size(obj)
            totalsrcsize = basesize
            if hdr.tid & GCFLAG_HASHFIELD:  # already a hash field, copy it too
                totalsrcsize += llmemory.sizeof(lltype.Signed)
            #
            if self.marked(obj):
                # the object is marked as suriving.  Compute the new object
                # size
                totaldstsize = totalsrcsize
                if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
                    # grow a new hash field -- with the exception: if
                    # the object actually doesn't move, don't
                    # (otherwise, we get a bogus toaddr > fromaddr)
                    if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                        totaldstsize += llmemory.sizeof(lltype.Signed)
                #
                if not translated_to_c():
                    llarena.arena_reserve(toaddr, basesize)
                    if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
                        llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
                #
                # save the field hdr.tid in the array tid_backup
                ll_assert(num < maxnum, "overflow of the tid_backup table")
                self.tid_backup[num] = self.get_type_id(obj)
                num += 1
                # compute forward_offset, the offset to the future copy
                # of this object
                forward_offset = toaddr - base_forwarding_addr
                # copy the first two gc flags in forward_offset
                ll_assert(forward_offset & 3 == 0, "misalignment!")
                forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
                hdr.tid = forward_offset | GCFLAG_MARKBIT
                ll_assert(self.marked(obj), "re-marking object failed!")
                # done
                toaddr += totaldstsize
            #
            fromaddr += totalsrcsize
            if not translated_to_c():
                assert toaddr - base_forwarding_addr <= fromaddr - self.space
        self.num_alive_objs = num
        self.finaladdr = toaddr

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_ref,  # stack roots
            MarkCompactGC._update_ref,  # static in prebuilt non-gc structures
            MarkCompactGC._update_ref,
        )  # static in prebuilt gc objects
        self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
def test_sizeof_array_with_no_length():
    A = lltype.Array(lltype.Signed, hints={'nolength': True})
    arraysize = llmemory.sizeof(A, 10)
    signedsize = llmemory.sizeof(lltype.Signed)
    def f():
        return arraysize-signedsize*10
    fn, t = getcompiled(f, [])
    res = fn()
    assert res == 0
Example #18
0
def encode_type_shape(builder, info, TYPE, index):
    """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
    offsets = offsets_to_gc_pointers(TYPE)
    infobits = index
    info.ofstoptrs = builder.offsets2table(offsets, TYPE)
    #
    kind_and_fptr = builder.special_funcptr_for_type(TYPE)
    if kind_and_fptr is not None:
        kind, fptr = kind_and_fptr
        info.finalizer_or_customtrace = fptr
        if kind == "finalizer":
            infobits |= T_HAS_FINALIZER
        elif kind == "light_finalizer":
            infobits |= T_HAS_FINALIZER | T_HAS_LIGHTWEIGHT_FINALIZER
        elif kind == "custom_trace":
            infobits |= T_HAS_CUSTOM_TRACE
        else:
            assert 0, kind
    #
    if not TYPE._is_varsize():
        info.fixedsize = llarena.round_up_for_allocation(llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
        # note about round_up_for_allocation(): in the 'info' table
        # we put a rounded-up size only for fixed-size objects.  For
        # varsize ones, the GC must anyway compute the size at run-time
        # and round up that result.
    else:
        infobits |= T_IS_VARSIZE
        varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
        info.fixedsize = llmemory.sizeof(TYPE, 0)
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
            ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
            varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
        else:
            assert isinstance(TYPE, lltype.GcArray)
            ARRAY = TYPE
            if isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == "gc":
                infobits |= T_IS_GCARRAY_OF_GCPTR
            varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
        assert isinstance(ARRAY, lltype.Array)
        if ARRAY.OF != lltype.Void:
            offsets = offsets_to_gc_pointers(ARRAY.OF)
        else:
            offsets = ()
        if len(offsets) > 0:
            infobits |= T_HAS_GCPTR_IN_VARSIZE
        varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
        varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
    if builder.is_weakref_type(TYPE):
        infobits |= T_IS_WEAKREF
    if is_subclass_of_object(TYPE):
        infobits |= T_IS_RPYTHON_INSTANCE
    info.infobits = infobits | T_KEY_VALUE
Example #19
0
def test_itemoffset_void():
    py.test.skip("not supported")
    A = lltype.GcArray(lltype.Void)
    s = llmemory.sizeof(A, 1)
    s += llmemory.sizeof(lltype.Signed)
    print s
    def f():
        return s
    fn = compile_function(f, [])
    res = fn()
    assert res > 0
Example #20
0
def test_shrink_obj():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Example #21
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0, 
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Example #22
0
def test_sizeof_void_array():
    from pypy.rpython.lltypesystem import llmemory
    A = Array(Void)
    size1 = llmemory.sizeof(A, 1)
    size2 = llmemory.sizeof(A, 14)
    def f(x):
        if x:
            return size1
        else:
            return size2
    fn = compile_function(f, [int])
    res1 = fn(1)
    res2 = fn(0)
    assert res1 == res2
Example #23
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Example #24
0
    def test_keep_all_keepalives(self):
        SIZE = llmemory.sizeof(lltype.Signed)
        PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
        class A:
            def __init__(self):
                self.addr = llmemory.raw_malloc(SIZE)
            def __del__(self):
                llmemory.raw_free(self.addr)
        class B:
            pass
        def myfunc():
            b = B()
            b.keep = A()
            b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
            b.data[0] = 42
            ptr = b.data
            # normally 'b' could go away as early as here, which would free
            # the memory held by the instance of A in b.keep...
            res = ptr[0]
            # ...so we explicitly keep 'b' alive until here
            objectmodel.keepalive_until_here(b)
            return res
        graph = self.check(myfunc, [], [], 42,
                           must_be_removed=False)    # 'A' instance left

        # there is a getarrayitem near the end of the graph of myfunc.
        # However, the memory it accesses must still be protected by the
        # following keepalive, even after malloc removal
        entrymap = mkentrymap(graph)
        [link] = entrymap[graph.returnblock]
        assert link.prevblock.operations[-1].opname == 'keepalive'
Example #25
0
def sizeof(tp):
    """Similar to llmemory.sizeof() but tries hard to return a integer
    instead of a symbolic value.
    """
    if isinstance(tp, lltype.Typedef):
        tp = tp.OF
    if isinstance(tp, lltype.FixedSizeArray):
        return sizeof(tp.OF) * tp.length
    if isinstance(tp, lltype.Struct):
        # the hint is present in structures probed by rffi_platform.
        size = tp._hints.get('size')
        if size is None:
            size = llmemory.sizeof(tp)    # a symbolic result in this case
        return size
    if isinstance(tp, lltype.Ptr) or tp is llmemory.Address:
        tp = ULONG     # XXX!
    if tp is lltype.Char or tp is lltype.Bool:
        return 1
    if tp is lltype.UniChar:
        return r_wchar_t.BITS/8
    if tp is lltype.Float:
        return 8
    if tp is lltype.SingleFloat:
        return 4
    assert isinstance(tp, lltype.Number)
    if tp is lltype.Signed:
        return ULONG._type.BITS/8
    return tp._type.BITS/8
Example #26
0
File: hybrid.py Project: ieure/pypy
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Example #27
0
    def test_qsort(self):
        CMPFUNC = lltype.FuncType([rffi.VOIDP, rffi.VOIDP], rffi.INT)
        qsort = rffi.llexternal('qsort', [rffi.VOIDP,
                                          rffi.SIZE_T,
                                          rffi.SIZE_T,
                                          lltype.Ptr(CMPFUNC)],
                                lltype.Void)

        lst = [23, 43, 24, 324, 242, 34, 78, 5, 3, 10]
        A = lltype.Array(lltype.Signed, hints={'nolength': True})
        a = lltype.malloc(A, 10, flavor='raw')
        for i in range(10):
            a[i] = lst[i]

        SIGNEDPTR = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))

        def my_compar(p1, p2):
            p1 = rffi.cast(SIGNEDPTR, p1)
            p2 = rffi.cast(SIGNEDPTR, p2)
            print 'my_compar:', p1[0], p2[0]
            return rffi.cast(rffi.INT, cmp(p1[0], p2[0]))

        qsort(rffi.cast(rffi.VOIDP, a),
              rffi.cast(rffi.SIZE_T, 10),
              rffi.cast(rffi.SIZE_T, llmemory.sizeof(lltype.Signed)),
              llhelper(lltype.Ptr(CMPFUNC), my_compar))

        for i in range(10):
            print a[i],
        print
        lst.sort()
        for i in range(10):
            assert a[i] == lst[i]
        lltype.free(a, flavor='raw')
        assert not ALLOCATED     # detects memory leaks in the test
Example #28
0
def test_address_order():
    a = arena_malloc(20, False)
    assert eq(a, a)
    assert lt(a, a+1)
    assert lt(a+5, a+20)

    b = arena_malloc(20, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a+19, b)
    assert lt(a, b+19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Example #29
0
 def _gct_resize_buffer_no_realloc(self, hop, v_lgt):
     op = hop.spaceop
     meth = self.gct_fv_gc_malloc_varsize
     flags = {'flavor':'gc', 'varsize': True, 'keep_current_args': True}
     self.varsize_malloc_helper(hop, flags, meth, [])
     # fish resvar
     v_newbuf = hop.llops[-1].result
     v_src = op.args[0]
     TYPE = v_src.concretetype.TO
     c_fldname = rmodel.inputconst(lltype.Void, TYPE._arrayfld)
     v_adrsrc = hop.genop('cast_ptr_to_adr', [v_src],
                          resulttype=llmemory.Address)
     v_adrnewbuf = hop.genop('cast_ptr_to_adr', [v_newbuf],
                             resulttype=llmemory.Address)
     ofs = (llmemory.offsetof(TYPE, TYPE._arrayfld) +
            llmemory.itemoffsetof(getattr(TYPE, TYPE._arrayfld), 0))
     v_ofs = rmodel.inputconst(lltype.Signed, ofs)
     v_adrsrc = hop.genop('adr_add', [v_adrsrc, v_ofs],
                          resulttype=llmemory.Address)
     v_adrnewbuf = hop.genop('adr_add', [v_adrnewbuf, v_ofs],
                             resulttype=llmemory.Address)
     size = llmemory.sizeof(getattr(TYPE, TYPE._arrayfld).OF)
     c_size = rmodel.inputconst(lltype.Signed, size)
     v_lgtsym = hop.genop('int_mul', [c_size, v_lgt],
                          resulttype=lltype.Signed) 
     vlist = [v_adrsrc, v_adrnewbuf, v_lgtsym]
     hop.genop('raw_memcopy', vlist)
Example #30
0
File: rstr.py Project: alkorzt/pypy
 def copy_string_contents(src, dst, srcstart, dststart, length):
     assert srcstart >= 0
     assert dststart >= 0
     assert length >= 0
     src = llmemory.cast_ptr_to_adr(src) + _str_ofs(srcstart)
     dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart)
     llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length)
Example #31
0
 def allocToken(T):
     return llmemory.sizeof(T)
Example #32
0
 def nextleft(iself, gc, range_lowest, prev):
     # Return the next valid GC object's address, in right-to-left
     # order from the shadowstack array.  This usually means just
     # returning "prev - sizeofaddr", until we reach "range_lowest",
     # except that we are skipping NULLs.  If "prev - sizeofaddr"
     # contains a MARKER_FRAME instead, then we go into
     # JIT-frame-lookup mode.
     #
     while True:
         #
         # If we are not iterating right now in a JIT frame
         if iself.frame_addr == 0:
             #
             # Look for the next shadowstack address that
             # contains a valid pointer
             while prev != range_lowest:
                 prev -= llmemory.sizeof(llmemory.Address)
                 if prev.signed[0] == self.MARKER_FRAME:
                     break
                 if gc.points_to_valid_gc_object(prev):
                     return prev
             else:
                 return llmemory.NULL  # done
             #
             # It's a JIT frame.  Save away 'prev' for later, and
             # go into JIT-frame-exploring mode.
             prev -= llmemory.sizeof(llmemory.Address)
             frame_addr = prev.signed[0]
             iself.saved_prev = prev
             iself.frame_addr = frame_addr
             addr = llmemory.cast_int_to_adr(frame_addr +
                                             self.force_index_ofs)
             addr = iself.translateptr(iself.context, addr)
             force_index = addr.signed[0]
             if force_index < 0:
                 force_index = ~force_index
             # NB: the next line reads a still-alive _callshapes,
             # because we ensure that just before we called this
             # piece of assembler, we put on the (same) stack a
             # pointer to a loop_token that keeps the force_index
             # alive.
             callshape = self._callshapes[force_index]
         else:
             # Continuing to explore this JIT frame
             callshape = iself.callshape
         #
         # 'callshape' points to the next INT of the callshape.
         # If it's zero we are done with the JIT frame.
         while rffi.cast(lltype.Signed, callshape[0]) != 0:
             #
             # Non-zero: it's an offset inside the JIT frame.
             # Read it and increment 'callshape'.
             offset = rffi.cast(lltype.Signed, callshape[0])
             callshape = lltype.direct_ptradd(callshape, 1)
             addr = llmemory.cast_int_to_adr(iself.frame_addr +
                                             offset)
             addr = iself.translateptr(iself.context, addr)
             if gc.points_to_valid_gc_object(addr):
                 #
                 # The JIT frame contains a valid GC pointer at
                 # this address (as opposed to NULL).  Save
                 # 'callshape' for the next call, and return the
                 # address.
                 iself.callshape = callshape
                 return addr
         #
         # Restore 'prev' and loop back to the start.
         iself.frame_addr = 0
         prev = iself.saved_prev
Example #33
0
 def _str_ofs(item):
     return (llmemory.offsetof(TP, 'chars') +
             llmemory.itemoffsetof(TP.chars, 0) +
             llmemory.sizeof(CHAR_TP) * item)
Example #34
0
File: boehm.py Project: njues/Sypy
                  resultvar=hop.spaceop.result)

    def gct_gc_id(self, hop):
        # this is the logic from the HIDE_POINTER macro in <gc/gc.h>
        v_int = hop.genop('cast_ptr_to_int', [hop.spaceop.args[0]],
                          resulttype=lltype.Signed)
        hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result)


########## weakrefs ##########
# Boehm: weakref objects are small structures containing only a Boehm
# disappearing link.  We don't have to hide the link's value with
# HIDE_POINTER(), because we explicitly use GC_MALLOC_ATOMIC().

WEAKLINK = lltype.FixedSizeArray(llmemory.Address, 1)
sizeof_weakreflink = llmemory.sizeof(WEAKLINK)
empty_weaklink = lltype.malloc(WEAKLINK, immortal=True)
empty_weaklink[0] = llmemory.NULL


def ll_weakref_create(targetaddr):
    link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
    if not link:
        raise MemoryError
    plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
    plink[0] = targetaddr
    llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
    return llmemory.cast_ptr_to_weakrefptr(plink)


def ll_weakref_deref(wref):
Example #35
0
class TypeLayoutBuilder(object):
    can_add_new_types = True
    can_encode_type_shape = True  # set to False initially by the JIT

    size_of_fixed_type_info = llmemory.sizeof(GCData.TYPE_INFO)

    def __init__(self, GCClass, lltype2vtable):
        self.GCClass = GCClass
        self.lltype2vtable = lltype2vtable
        self.make_type_info_group()
        self.id_of_type = {}  # {LLTYPE: type_id}
        self.seen_roots = {}
        # the following are lists of addresses of gc pointers living inside the
        # prebuilt structures.  It should list all the locations that could
        # possibly point to a GC heap object.
        # this lists contains pointers in GcStructs and GcArrays
        self.addresses_of_static_ptrs = []
        # this lists contains pointers in raw Structs and Arrays
        self.addresses_of_static_ptrs_in_nongc = []
        # for debugging, the following list collects all the prebuilt
        # GcStructs and GcArrays
        self.all_prebuilt_gc = []
        self.finalizer_funcptrs = {}
        self.offsettable_cache = {}

    def make_type_info_group(self):
        self.type_info_group = llgroup.group("typeinfo")
        # don't use typeid 0, may help debugging
        DUMMY = lltype.Struct("dummy", ('x', lltype.Signed))
        dummy = lltype.malloc(DUMMY, immortal=True, zero=True)
        self.type_info_group.add_member(dummy)

    def get_type_id(self, TYPE):
        try:
            return self.id_of_type[TYPE]
        except KeyError:
            assert self.can_add_new_types
            assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
            # Record the new type_id description as a TYPE_INFO structure.
            # build the TYPE_INFO structure
            if not TYPE._is_varsize():
                fullinfo = lltype.malloc(GCData.TYPE_INFO,
                                         immortal=True,
                                         zero=True)
                info = fullinfo
            else:
                fullinfo = lltype.malloc(GCData.VARSIZE_TYPE_INFO,
                                         immortal=True,
                                         zero=True)
                info = fullinfo.header
            if self.can_encode_type_shape:
                encode_type_shape(self, info, TYPE)
            else:
                self._pending_type_shapes.append((info, TYPE))
            # store it
            type_id = self.type_info_group.add_member(fullinfo)
            self.id_of_type[TYPE] = type_id
            # store the vtable of the type (if any) immediately thereafter
            # (note that if gcconfig.removetypeptr is False, lltype2vtable
            # is empty)
            vtable = self.lltype2vtable.get(TYPE, None)
            if vtable is not None:
                # check that if we have a vtable, we are not varsize
                assert lltype.typeOf(fullinfo) == GCData.TYPE_INFO_PTR
                vtable = lltype.normalizeptr(vtable)
                self.type_info_group.add_member(vtable)
            return type_id

    def get_info(self, type_id):
        return llop.get_group_member(GCData.TYPE_INFO_PTR,
                                     self.type_info_group._as_ptr(), type_id)

    def get_info_varsize(self, type_id):
        return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
                                     self.type_info_group._as_ptr(), type_id)

    def is_weakref(self, type_id):
        return self.get_info(type_id).infobits & T_IS_WEAKREF

    def encode_type_shapes_now(self):
        if not self.can_encode_type_shape:
            self.can_encode_type_shape = True
            for info, TYPE in self._pending_type_shapes:
                encode_type_shape(self, info, TYPE)
            del self._pending_type_shapes

    def delay_encoding(self):
        # used by the JIT
        self._pending_type_shapes = []
        self.can_encode_type_shape = False

    def offsets2table(self, offsets, TYPE):
        if len(offsets) == 0:
            TYPE = lltype.Void  # we can share all zero-length arrays
        try:
            return self.offsettable_cache[TYPE]
        except KeyError:
            cachedarray = lltype.malloc(GCData.OFFSETS_TO_GC_PTR,
                                        len(offsets),
                                        immortal=True)
            for i, value in enumerate(offsets):
                cachedarray[i] = value
            self.offsettable_cache[TYPE] = cachedarray
            return cachedarray

    def close_table(self):
        # make sure we no longer add members to the type_info_group.
        self.can_add_new_types = False
        self.offsettable_cache = None
        return self.type_info_group

    def finalizer_funcptr_for_type(self, TYPE):
        if TYPE in self.finalizer_funcptrs:
            return self.finalizer_funcptrs[TYPE]
        fptr = self.make_finalizer_funcptr_for_type(TYPE)
        self.finalizer_funcptrs[TYPE] = fptr
        return fptr

    def make_finalizer_funcptr_for_type(self, TYPE):
        # must be overridden for proper finalizer support
        return lltype.nullptr(GCData.ADDRESS_VOID_FUNC)

    def initialize_gc_query_function(self, gc):
        return GCData(self.type_info_group).set_query_functions(gc)

    def consider_constant(self, TYPE, value, gc):
        if value is not lltype.top_container(value):
            return
        if id(value) in self.seen_roots:
            return
        self.seen_roots[id(value)] = True

        if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
            typeid = self.get_type_id(TYPE)
            hdr = gc.gcheaderbuilder.new_header(value)
            adr = llmemory.cast_ptr_to_adr(hdr)
            gc.init_gc_object_immortal(adr, typeid)
            self.all_prebuilt_gc.append(value)

        # The following collects the addresses of all the fields that have
        # a GC Pointer type, inside the current prebuilt object.  All such
        # fields are potential roots: unless the structure is immutable,
        # they could be changed later to point to GC heap objects.
        adr = llmemory.cast_ptr_to_adr(value._as_ptr())
        if TYPE._gckind == "gc":
            if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG:
                appendto = self.addresses_of_static_ptrs
            else:
                return
        else:
            appendto = self.addresses_of_static_ptrs_in_nongc
        for a in gc_pointers_inside(value, adr, mutable_only=True):
            appendto.append(a)
Example #36
0
    def update_forward_pointers(self, toaddr, maxnum):
        self.base_forwarding_addr = base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        num = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            # compute the original object size, including the
            # optional hash field
            basesize = size_gc_header + self.get_size(obj)
            totalsrcsize = basesize
            if hdr.tid & GCFLAG_HASHFIELD:  # already a hash field, copy it too
                totalsrcsize += llmemory.sizeof(lltype.Signed)
            #
            if self.marked(obj):
                # the object is marked as suriving.  Compute the new object
                # size
                totaldstsize = totalsrcsize
                if (hdr.tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD) ==
                               GCFLAG_HASHTAKEN):
                    # grow a new hash field -- with the exception: if
                    # the object actually doesn't move, don't
                    # (otherwise, we get a bogus toaddr > fromaddr)
                    if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                        totaldstsize += llmemory.sizeof(lltype.Signed)
                #
                if not translated_to_c():
                    llarena.arena_reserve(toaddr, basesize)
                    if (raw_malloc_usage(totaldstsize) >
                        raw_malloc_usage(basesize)):
                        llarena.arena_reserve(toaddr + basesize,
                                              llmemory.sizeof(lltype.Signed))
                #
                # save the field hdr.tid in the array tid_backup
                ll_assert(num < maxnum, "overflow of the tid_backup table")
                self.tid_backup[num] = self.get_type_id(obj)
                num += 1
                # compute forward_offset, the offset to the future copy
                # of this object
                forward_offset = toaddr - base_forwarding_addr
                # copy the first two gc flags in forward_offset
                ll_assert(forward_offset & 3 == 0, "misalignment!")
                forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
                hdr.tid = forward_offset | GCFLAG_MARKBIT
                ll_assert(self.marked(obj), "re-marking object failed!")
                # done
                toaddr += totaldstsize
            #
            fromaddr += totalsrcsize
            if not translated_to_c():
                assert toaddr - base_forwarding_addr <= fromaddr - self.space
        self.num_alive_objs = num
        self.finaladdr = toaddr

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_ref,  # stack roots
            MarkCompactGC._update_ref,  # static in prebuilt non-gc structures
            MarkCompactGC._update_ref)  # static in prebuilt gc objects
        self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
Example #37
0
    elif isinstance(TYPE, lltype.Array):
        ITEM = TYPE.OF
        if isinstance(ITEM, lltype.Ptr) and ITEM.TO._gckind == 'gc':
            null = lltype.nullptr(ITEM.TO)
            for i in range(p._obj.getlength()):
                p[i] = null
        elif isinstance(ITEM, lltype.ContainerType):
            for i in range(p._obj.getlength()):
                zero_gc_pointers_inside(p[i], ITEM)

########## weakrefs ##########
# framework: weakref objects are small structures containing only an address

WEAKREF = lltype.GcStruct("weakref", ("weakptr", llmemory.Address))
WEAKREFPTR = lltype.Ptr(WEAKREF)
sizeof_weakref= llmemory.sizeof(WEAKREF)
empty_weakref = lltype.malloc(WEAKREF, immortal=True)
empty_weakref.weakptr = llmemory.NULL

def ll_weakref_deref(wref):
    wref = llmemory.cast_weakrefptr_to_ptr(WEAKREFPTR, wref)
    return wref.weakptr

def convert_weakref_to(targetptr):
    # Prebuilt weakrefs don't really need to be weak at all,
    # but we need to emulate the structure expected by ll_weakref_deref().
    if not targetptr:
        return empty_weakref
    else:
        link = lltype.malloc(WEAKREF, immortal=True)
        link.weakptr = llmemory.cast_ptr_to_adr(targetptr)
Example #38
0
import py
from pypy.rpython.memory.gc.minimarkpage import ArenaCollection
from pypy.rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
from pypy.rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
from pypy.rpython.memory.gc.minimarkpage import _dummy_size
from pypy.rpython.lltypesystem import lltype, llmemory, llarena
from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr

NULL = llmemory.NULL
SHIFT = WORD
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))


def test_allocate_arena():
    ac = ArenaCollection(SHIFT + 64 * 20, 64, 1)
    ac.allocate_new_arena()
    assert ac.num_uninitialized_pages == 20
    upages = ac.current_arena.freepages
    upages + 64 * 20  # does not raise
    py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
    #
    ac = ArenaCollection(SHIFT + 64 * 20 + 7, 64, 1)
    ac.allocate_new_arena()
    assert ac.num_uninitialized_pages == 20
    upages = ac.current_arena.freepages
    upages + 64 * 20 + 7  # does not raise
    py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")


def test_allocate_new_page():
    pagesize = hdrsize + 16
Example #39
0
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
    assert " " not in pagelayout.rstrip(" ")
    nb_pages = len(pagelayout)
    arenasize = pagesize * (nb_pages + 1) - 1
    ac = ArenaCollection(arenasize, pagesize, 9 * WORD)

    #
    def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
        assert step in (1, 2)
        llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
        page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
        if step == 1:
            page.nfree = 0
            nuninitialized = nblocks - nusedblocks
        else:
            page.nfree = nusedblocks
            nuninitialized = nblocks - 2 * nusedblocks
        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
        if nusedblocks < nblocks:
            chainedlists = ac.page_for_size
        else:
            chainedlists = ac.full_page_for_size
        page.nextpage = chainedlists[size_class]
        page.arena = ac.current_arena
        chainedlists[size_class] = page
        if fill_with_objects:
            for i in range(0, nusedblocks * step, step):
                objaddr = pageaddr + hdrsize + i * size_block
                llarena.arena_reserve(objaddr, _dummy_size(size_block))
            if step == 2:
                prev = 'page.freeblock'
                for i in range(1, nusedblocks * step, step):
                    holeaddr = pageaddr + hdrsize + i * size_block
                    llarena.arena_reserve(holeaddr,
                                          llmemory.sizeof(llmemory.Address))
                    exec '%s = holeaddr' % prev in globals(), locals()
                    prevhole = holeaddr
                    prev = 'prevhole.address[0]'
                endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
                exec '%s = endaddr' % prev in globals(), locals()
        assert ac._nuninitialized(page, size_class) == nuninitialized

    #
    ac.allocate_new_arena()
    num_initialized_pages = len(pagelayout.rstrip(" "))
    ac._startpageaddr = ac.current_arena.freepages
    if pagelayout.endswith(" "):
        ac.current_arena.freepages += pagesize * num_initialized_pages
    else:
        ac.current_arena.freepages = NULL
    ac.num_uninitialized_pages -= num_initialized_pages
    #
    for i in reversed(range(num_initialized_pages)):
        pageaddr = pagenum(ac, i)
        c = pagelayout[i]
        if '1' <= c <= '9':  # a partially used page (1 block free)
            size_class = int(c)
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks - 1)
        elif c == '.':  # a free, but initialized, page
            llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
            pageaddr.address[0] = ac.current_arena.freepages
            ac.current_arena.freepages = pageaddr
            ac.current_arena.nfreepages += 1
        elif c == '#':  # a random full page, in the list 'full_pages'
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks)
        elif c == '/':  # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr,
                 size_class,
                 size_block,
                 nblocks,
                 nblocks // 3,
                 step=2)
    #
    ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
    return ac
Example #40
0
 def check((ssize, msize, smsize, mssize)):
     assert ssize == llmemory.sizeof(Signed)
     assert msize == llmemory.sizeof(Signed) * 2
     assert smsize == msize
     assert mssize == msize
Example #41
0
                # retaddr not found!
                llop.debug_fatalerror(lltype.Void, "cannot find gc roots!")
                return False

            def next_gcroot_from_current_frame(self):
                i = self.remaining_roots_in_current_frame - 1
                self.remaining_roots_in_current_frame = i
                ll_assert(i >= 0, "bad call to next_gcroot_from_current_frame")
                liveoffset = self.liveoffsets.signed[i]
                return self.frame_data_base + liveoffset


        return StackRootIterator


sizeofaddr = llmemory.sizeof(llmemory.Address)
arrayitemsize = 2 * sizeofaddr


def binary_search(start, end, addr1):
    """Search for an element in a sorted array.

    The interval from the start address (included) to the end address
    (excluded) is assumed to be a sorted arrays of pairs (addr1, addr2).
    This searches for the item with a given addr1 and returns its
    address.
    """
    count = (end - start) // arrayitemsize
    while count > 1:
        middleindex = count // 2
        middle = start + middleindex * arrayitemsize
Example #42
0
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)


SX = lltype.Struct('S', ('foo', lltype.Signed), ('x', lltype.Signed))
SPTR = lltype.Ptr(SX)
precomputed_size = round_up_for_allocation(llmemory.sizeof(SX))


def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
Example #43
0
 def get_size_incl_hash(self, obj):
     size = self.get_size(obj)
     hdr = self.header(obj)
     if hdr.tid & GCFLAG_HASHFIELD:
         size += llmemory.sizeof(lltype.Signed)
     return size
Example #44
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2 * ssize + 1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(
        ArenaError,
        arena_reserve,
        a + ssize + 1,  # misaligned
        llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y', lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize + 1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a + 1, llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize,
                   llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize + 1,
                   llmemory.sizeof(S), False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Example #45
0
class TypeLayoutBuilder(object):
    can_add_new_types = True
    can_encode_type_shape = True  # set to False initially by the JIT

    size_of_fixed_type_info = llmemory.sizeof(GCData.TYPE_INFO)

    def __init__(self, GCClass, lltype2vtable=None):
        self.GCClass = GCClass
        self.lltype2vtable = lltype2vtable
        self.make_type_info_group()
        self.id_of_type = {}  # {LLTYPE: type_id}
        self.iseen_roots = identity_dict()
        # the following are lists of addresses of gc pointers living inside the
        # prebuilt structures.  It should list all the locations that could
        # possibly point to a GC heap object.
        # this lists contains pointers in GcStructs and GcArrays
        self.addresses_of_static_ptrs = []
        # this lists contains pointers in raw Structs and Arrays
        self.addresses_of_static_ptrs_in_nongc = []
        # for debugging, the following list collects all the prebuilt
        # GcStructs and GcArrays
        self.all_prebuilt_gc = []
        self._special_funcptrs = {}
        self.offsettable_cache = {}

    def make_type_info_group(self):
        self.type_info_group = llgroup.group("typeinfo")
        # don't use typeid 0, may help debugging
        DUMMY = lltype.Struct("dummy", ('x', lltype.Signed))
        dummy = lltype.malloc(DUMMY, immortal=True, zero=True)
        self.type_info_group.add_member(dummy)

    def get_type_id(self, TYPE):
        try:
            return self.id_of_type[TYPE]
        except KeyError:
            assert self.can_add_new_types
            assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
            # Record the new type_id description as a TYPE_INFO structure.
            # build the TYPE_INFO structure
            if not TYPE._is_varsize():
                fullinfo = lltype.malloc(GCData.TYPE_INFO,
                                         immortal=True,
                                         zero=True)
                info = fullinfo
            else:
                fullinfo = lltype.malloc(GCData.VARSIZE_TYPE_INFO,
                                         immortal=True,
                                         zero=True)
                info = fullinfo.header
            type_id = self.type_info_group.add_member(fullinfo)
            if self.can_encode_type_shape:
                encode_type_shape(self, info, TYPE, type_id.index)
            else:
                self._pending_type_shapes.append((info, TYPE, type_id.index))
            # store it
            self.id_of_type[TYPE] = type_id
            self.add_vtable_after_typeinfo(TYPE)
            return type_id

    def add_vtable_after_typeinfo(self, TYPE):
        # if gcremovetypeptr is False, then lltype2vtable is None and it
        # means that we don't have to store the vtables in type_info_group.
        if self.lltype2vtable is None:
            return
        # does the type have a vtable?
        vtable = self.lltype2vtable.get(TYPE, None)
        if vtable is not None:
            # yes.  check that in this case, we are not varsize
            assert not TYPE._is_varsize()
            vtable = lltype.normalizeptr(vtable)
            self.type_info_group.add_member(vtable)
        else:
            # no vtable from lltype2vtable -- double-check to be sure
            # that it's not a subclass of OBJECT.
            assert not is_subclass_of_object(TYPE)

    def get_info(self, type_id):
        res = llop.get_group_member(GCData.TYPE_INFO_PTR,
                                    self.type_info_group._as_ptr(), type_id)
        _check_valid_type_info(res)
        return res

    def get_info_varsize(self, type_id):
        res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
                                    self.type_info_group._as_ptr(), type_id)
        _check_valid_type_info_varsize(res)
        return res

    def is_weakref_type(self, TYPE):
        return TYPE == WEAKREF

    def encode_type_shapes_now(self):
        if not self.can_encode_type_shape:
            self.can_encode_type_shape = True
            for info, TYPE, index in self._pending_type_shapes:
                encode_type_shape(self, info, TYPE, index)
            del self._pending_type_shapes

    def delay_encoding(self):
        # used by the JIT
        self._pending_type_shapes = []
        self.can_encode_type_shape = False

    def offsets2table(self, offsets, TYPE):
        if len(offsets) == 0:
            TYPE = lltype.Void  # we can share all zero-length arrays
        try:
            return self.offsettable_cache[TYPE]
        except KeyError:
            cachedarray = lltype.malloc(GCData.OFFSETS_TO_GC_PTR,
                                        len(offsets),
                                        immortal=True)
            for i, value in enumerate(offsets):
                cachedarray[i] = value
            self.offsettable_cache[TYPE] = cachedarray
            return cachedarray

    def close_table(self):
        # make sure we no longer add members to the type_info_group.
        self.can_add_new_types = False
        self.offsettable_cache = None
        return self.type_info_group

    def special_funcptr_for_type(self, TYPE):
        if TYPE in self._special_funcptrs:
            return self._special_funcptrs[TYPE]
        fptr1, is_lightweight = self.make_finalizer_funcptr_for_type(TYPE)
        fptr2 = self.make_custom_trace_funcptr_for_type(TYPE)
        assert not (fptr1 and fptr2), (
            "type %r needs both a finalizer and a custom tracer" % (TYPE, ))
        if fptr1:
            if is_lightweight:
                kind_and_fptr = "light_finalizer", fptr1
            else:
                kind_and_fptr = "finalizer", fptr1
        elif fptr2:
            kind_and_fptr = "custom_trace", fptr2
        else:
            kind_and_fptr = None
        self._special_funcptrs[TYPE] = kind_and_fptr
        return kind_and_fptr

    def make_finalizer_funcptr_for_type(self, TYPE):
        # must be overridden for proper finalizer support
        return None, False

    def make_custom_trace_funcptr_for_type(self, TYPE):
        # must be overridden for proper custom tracer support
        return None

    def initialize_gc_query_function(self, gc):
        return GCData(self.type_info_group).set_query_functions(gc)

    def consider_constant(self, TYPE, value, gc):
        if value is not lltype.top_container(value):
            return
        if value in self.iseen_roots:
            return
        self.iseen_roots[value] = True

        if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
            typeid = self.get_type_id(TYPE)
            hdr = gc.gcheaderbuilder.new_header(value)
            adr = llmemory.cast_ptr_to_adr(hdr)
            gc.init_gc_object_immortal(adr, typeid)
            self.all_prebuilt_gc.append(value)

        # The following collects the addresses of all the fields that have
        # a GC Pointer type, inside the current prebuilt object.  All such
        # fields are potential roots: unless the structure is immutable,
        # they could be changed later to point to GC heap objects.
        adr = llmemory.cast_ptr_to_adr(value._as_ptr())
        if TYPE._gckind == "gc":
            if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG:
                appendto = self.addresses_of_static_ptrs
            else:
                return
        else:
            appendto = self.addresses_of_static_ptrs_in_nongc
        for a in gc_pointers_inside(value, adr, mutable_only=True):
            appendto.append(a)