Esempio n. 1
0
def get_array_token(T, translate_support_code):
    # T can be an array or a var-sized structure
    if translate_support_code:
        basesize = llmemory.sizeof(T, 0)     # this includes +1 for STR
        if isinstance(T, lltype.Struct):
            SUBARRAY = getattr(T, T._arrayfld)
            itemsize = llmemory.sizeof(SUBARRAY.OF)
            ofs_length = (llmemory.offsetof(T, T._arrayfld) +
                          llmemory.ArrayLengthOffset(SUBARRAY))
        else:
            if T._hints.get('nolength', None):
                ofs_length = -1
            else:
                ofs_length = llmemory.ArrayLengthOffset(T)
            itemsize = llmemory.sizeof(T.OF)
    else:
        if isinstance(T, lltype.Struct):
            assert T._arrayfld is not None, "%r is not variable-sized" % (T,)
            cstruct = ll2ctypes.get_ctypes_type(T)
            cfield = getattr(cstruct, T._arrayfld)
            before_array_part = cfield.offset
            T = getattr(T, T._arrayfld)
        else:
            before_array_part = 0
        carray = ll2ctypes.get_ctypes_type(T)
        if T._hints.get('nolength', None):
            ofs_length = -1
        else:
            assert carray.length.size == WORD
            ofs_length = before_array_part + carray.length.offset
        basesize = before_array_part + carray.items.offset
        basesize += T._hints.get('extra_item_after_alloc', 0)  # +1 for STR
        carrayitem = ll2ctypes.get_ctypes_type(T.OF)
        itemsize = ctypes.sizeof(carrayitem)
    return basesize, itemsize, ofs_length
Esempio n. 2
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2*nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks*step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks*step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Esempio n. 3
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2 * nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks * step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks * step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec('%s = holeaddr' % prev, globals(), locals())
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
             exec('%s = endaddr' % prev, globals(), locals())
     assert ac._nuninitialized(page, size_class) == nuninitialized
Esempio n. 4
0
 def test_simple_access(self):
     AddressStack = get_address_stack()
     addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr2
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr1
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr0
     assert not ll.non_empty()
     ll.append(addr0)
     ll.delete()
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     ll.append(NULL)
     a = ll.pop()
     assert a == NULL
     ll.delete()
     raw_free(addr2)
     raw_free(addr1)
     raw_free(addr0)
Esempio n. 5
0
    def varsize_malloc_helper(self, hop, flags, meth, extraargs):
        def intconst(c): return rmodel.inputconst(lltype.Signed, c)
        op = hop.spaceop
        TYPE = op.result.concretetype.TO
        assert TYPE._is_varsize()
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
        else:
            ARRAY = TYPE
        assert isinstance(ARRAY, lltype.Array)
        c_const_size = intconst(llmemory.sizeof(TYPE, 0))
        c_item_size = intconst(llmemory.sizeof(ARRAY.OF))

        if ARRAY._hints.get("nolength", False):
            c_offset_to_length = None
        else:
            if isinstance(TYPE, lltype.Struct):
                offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
                                   llmemory.ArrayLengthOffset(ARRAY)
            else:
                offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
            c_offset_to_length = intconst(offset_to_length)

        args = [hop] + extraargs + [flags, TYPE,
                op.args[-1], c_const_size, c_item_size, c_offset_to_length]
        v_raw = meth(*args)
        hop.cast_result(v_raw)
Esempio n. 6
0
 def test_simple_access(self):
     AddressStack = get_address_stack()
     addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr2
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr1
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr0
     assert not ll.non_empty()
     ll.append(addr0)
     ll.delete()
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     ll.append(NULL)
     a = ll.pop()
     assert a == NULL
     ll.delete()
     raw_free(addr2)
     raw_free(addr1)
     raw_free(addr0)
Esempio n. 7
0
    def varsize_malloc_helper(self, hop, flags, meth, extraargs):
        def intconst(c): return rmodel.inputconst(lltype.Signed, c)
        op = hop.spaceop
        TYPE = op.result.concretetype.TO
        assert TYPE._is_varsize()
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
        else:
            ARRAY = TYPE
        assert isinstance(ARRAY, lltype.Array)
        c_const_size = intconst(llmemory.sizeof(TYPE, 0))
        c_item_size = intconst(llmemory.sizeof(ARRAY.OF))

        if ARRAY._hints.get("nolength", False):
            c_offset_to_length = None
        else:
            if isinstance(TYPE, lltype.Struct):
                offset_to_length = llmemory.FieldOffset(TYPE, TYPE._arrayfld) + \
                                   llmemory.ArrayLengthOffset(ARRAY)
            else:
                offset_to_length = llmemory.ArrayLengthOffset(ARRAY)
            c_offset_to_length = intconst(offset_to_length)

        args = [hop] + extraargs + [flags, TYPE,
                op.args[-1], c_const_size, c_item_size, c_offset_to_length]
        v_raw = meth(*args)
        hop.cast_result(v_raw)
Esempio n. 8
0
 def identityhash(self, gcobj):
     # The following loop should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         if hdr.tid & GCFLAG_HASHMASK:
             break
         # It's the first time we ask for a hash, and it's not an
         # external object.  Shrink the top of space by the extra
         # hash word that will be needed after a collect.
         shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
         if shrunk_top < self.free:
             # Cannot shrink!  Do a collection, asking for at least
             # one word of free space, and try again.  May raise
             # MemoryError.  Obscure: not called directly, but
             # across an llop, to make sure that there is the
             # correct push_roots/pop_roots around the call...
             llop.gc_obtain_free_space(llmemory.Address,
                                       llmemory.sizeof(lltype.Signed))
             continue
         else:
             # Now we can have side-effects: lower the top of space
             # and set one of the GC_HASH_TAKEN_xxx flags.
             self.top_of_space = shrunk_top
             if self.is_in_nursery(obj):
                 hdr.tid |= GC_HASH_TAKEN_NURS
             else:
                 hdr.tid |= GC_HASH_TAKEN_ADDR
             break
     # Now we can return the result
     objsize = self.get_size(obj)
     return self._get_object_hash(obj, objsize, hdr.tid)
Esempio n. 9
0
def get_size(TYPE, translate_support_code):
    if translate_support_code:
        if TYPE._is_varsize():
            return llmemory.sizeof(TYPE, 0)
        return llmemory.sizeof(TYPE)
    ctype = ll2ctypes.get_ctypes_type(TYPE)
    return ctypes.sizeof(ctype)
Esempio n. 10
0
def get_array_token(T, translate_support_code):
    # T can be an array or a var-sized structure
    if translate_support_code:
        basesize = llmemory.sizeof(T, 0)
        if isinstance(T, lltype.Struct):
            SUBARRAY = getattr(T, T._arrayfld)
            itemsize = llmemory.sizeof(SUBARRAY.OF)
            ofs_length = (llmemory.offsetof(T, T._arrayfld) +
                          llmemory.ArrayLengthOffset(SUBARRAY))
        else:
            if T._hints.get('nolength', None):
                ofs_length = -1
            else:
                ofs_length = llmemory.ArrayLengthOffset(T)
            itemsize = llmemory.sizeof(T.OF)
    else:
        if isinstance(T, lltype.Struct):
            assert T._arrayfld is not None, "%r is not variable-sized" % (T, )
            cstruct = ll2ctypes.get_ctypes_type(T)
            cfield = getattr(cstruct, T._arrayfld)
            before_array_part = cfield.offset
            T = getattr(T, T._arrayfld)
        else:
            before_array_part = 0
        carray = ll2ctypes.get_ctypes_type(T)
        if T._hints.get('nolength', None):
            ofs_length = -1
        else:
            assert carray.length.size == WORD
            ofs_length = before_array_part + carray.length.offset
        basesize = before_array_part + carray.items.offset
        carrayitem = ll2ctypes.get_ctypes_type(T.OF)
        itemsize = ctypes.sizeof(carrayitem)
    return basesize, itemsize, ofs_length
Esempio n. 11
0
def get_size(TYPE, translate_support_code):
    if translate_support_code:
        if TYPE._is_varsize():
            return llmemory.sizeof(TYPE, 0)
        return llmemory.sizeof(TYPE)
    ctype = ll2ctypes.get_ctypes_type(TYPE)
    return ctypes.sizeof(ctype)
Esempio n. 12
0
 def identityhash(self, gcobj):
     # The following loop should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         if hdr.tid & GCFLAG_HASHMASK:
             break
         # It's the first time we ask for a hash, and it's not an
         # external object.  Shrink the top of space by the extra
         # hash word that will be needed after a collect.
         shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
         if shrunk_top < self.free:
             # Cannot shrink!  Do a collection, asking for at least
             # one word of free space, and try again.  May raise
             # MemoryError.  Obscure: not called directly, but
             # across an llop, to make sure that there is the
             # correct push_roots/pop_roots around the call...
             llop.gc_obtain_free_space(llmemory.Address,
                                       llmemory.sizeof(lltype.Signed))
             continue
         else:
             # Now we can have side-effects: lower the top of space
             # and set one of the GC_HASH_TAKEN_xxx flags.
             self.top_of_space = shrunk_top
             if self.is_in_nursery(obj):
                 hdr.tid |= GC_HASH_TAKEN_NURS
             else:
                 hdr.tid |= GC_HASH_TAKEN_ADDR
             break
     # Now we can return the result
     objsize = self.get_size(obj)
     return self._get_object_hash(obj, objsize, hdr.tid)
Esempio n. 13
0
def test_sizeof_array_with_no_length():
    A = lltype.Array(lltype.Signed, hints={'nolength': True})
    arraysize = llmemory.sizeof(A, 10)
    signedsize = llmemory.sizeof(lltype.Signed)
    def f():
        return arraysize-signedsize*10
    fn = compile(f, [])
    res = fn()
    assert res == 0
Esempio n. 14
0
def test_sizeof_array_with_no_length():
    A = lltype.Array(lltype.Signed, hints={'nolength': True})
    arraysize = llmemory.sizeof(A, 10)
    signedsize = llmemory.sizeof(lltype.Signed)

    def f():
        return arraysize - signedsize * 10

    fn = compile(f, [])
    res = fn()
    assert res == 0
Esempio n. 15
0
def encode_type_shape(builder, info, TYPE, index):
    """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
    offsets = offsets_to_gc_pointers(TYPE)
    infobits = index
    info.ofstoptrs = builder.offsets2table(offsets, TYPE)
    if len(offsets) > 0:
        infobits |= T_HAS_GCPTR
    #
    fptrs = builder.special_funcptr_for_type(TYPE)
    if fptrs:
        if "finalizer" in fptrs:
            info.finalizer = fptrs["finalizer"]
        if "light_finalizer" in fptrs:
            info.finalizer = fptrs["light_finalizer"]
            infobits |= T_HAS_LIGHTWEIGHT_FINALIZER
    #
    if not TYPE._is_varsize():
        info.fixedsize = llarena.round_up_for_allocation(
            llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
        # note about round_up_for_allocation(): in the 'info' table
        # we put a rounded-up size only for fixed-size objects.  For
        # varsize ones, the GC must anyway compute the size at run-time
        # and round up that result.
    else:
        infobits |= T_IS_VARSIZE
        varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
        info.fixedsize = llmemory.sizeof(TYPE, 0)
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
            ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
            varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
        else:
            assert isinstance(TYPE, lltype.GcArray)
            ARRAY = TYPE
            if (isinstance(ARRAY.OF, lltype.Ptr)
                and ARRAY.OF.TO._gckind == 'gc'):
                infobits |= T_IS_GCARRAY_OF_GCPTR
            varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
        assert isinstance(ARRAY, lltype.Array)
        if ARRAY.OF != lltype.Void:
            offsets = offsets_to_gc_pointers(ARRAY.OF)
        else:
            offsets = ()
        if len(offsets) > 0:
            infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR
        varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
        varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
    if builder.is_weakref_type(TYPE):
        infobits |= T_IS_WEAKREF
    if is_subclass_of_object(TYPE):
        infobits |= T_IS_RPYTHON_INSTANCE
    info.infobits = infobits | T_KEY_VALUE
Esempio n. 16
0
 def check((ssize, msize, smsize, mssize)):
     if is_arm:
         # ARM has stronger rules about aligned memory access
         # so according to the rules for round_up_for_allocation
         # we get two words here
         assert ssize == llmemory.sizeof(Signed) * 2
     else:
         assert ssize == llmemory.sizeof(Signed)
     assert msize == llmemory.sizeof(Signed) * 2
     assert smsize == msize
     assert mssize == msize
Esempio n. 17
0
def encode_type_shape(builder, info, TYPE, index):
    """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
    offsets = offsets_to_gc_pointers(TYPE)
    infobits = index
    info.ofstoptrs = builder.offsets2table(offsets, TYPE)
    if len(offsets) > 0:
        infobits |= T_HAS_GCPTR
    #
    fptrs = builder.special_funcptr_for_type(TYPE)
    if fptrs:
        if "destructor" in fptrs:
            info.customfunc = fptrs["destructor"]
        if "old_style_finalizer" in fptrs:
            info.customfunc = fptrs["old_style_finalizer"]
            infobits |= T_HAS_OLDSTYLE_FINALIZER
    #
    if not TYPE._is_varsize():
        info.fixedsize = llarena.round_up_for_allocation(
            llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
        # note about round_up_for_allocation(): in the 'info' table
        # we put a rounded-up size only for fixed-size objects.  For
        # varsize ones, the GC must anyway compute the size at run-time
        # and round up that result.
    else:
        infobits |= T_IS_VARSIZE
        varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
        info.fixedsize = llmemory.sizeof(TYPE, 0)
        if isinstance(TYPE, lltype.Struct):
            ARRAY = TYPE._flds[TYPE._arrayfld]
            ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
            varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
        else:
            assert isinstance(TYPE, lltype.GcArray)
            ARRAY = TYPE
            if (isinstance(ARRAY.OF, lltype.Ptr)
                    and ARRAY.OF.TO._gckind == 'gc'):
                infobits |= T_IS_GCARRAY_OF_GCPTR
            varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
            varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
        assert isinstance(ARRAY, lltype.Array)
        if ARRAY.OF != lltype.Void:
            offsets = offsets_to_gc_pointers(ARRAY.OF)
        else:
            offsets = ()
        if len(offsets) > 0:
            infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR
        varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
        varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
    if builder.is_weakref_type(TYPE):
        infobits |= T_IS_WEAKREF
    if is_subclass_of_object(TYPE):
        infobits |= T_IS_RPYTHON_INSTANCE
    info.infobits = infobits | T_KEY_VALUE
Esempio n. 18
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Esempio n. 19
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                        ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Esempio n. 20
0
 def test_sizeof_void_array(self):
     from rpython.rtyper.lltypesystem import llmemory
     A = Array(Void)
     size1 = llmemory.sizeof(A, 1)
     size2 = llmemory.sizeof(A, 14)
     def f(x):
         if x:
             return size1
         else:
             return size2
     fn = self.getcompiled(f, [int])
     res1 = fn(1)
     res2 = fn(0)
     assert res1 == res2
Esempio n. 21
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0, 
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Esempio n. 22
0
File: rgc.py Progetto: sota/pypy
def ll_arraycopy(source, dest, source_start, dest_start, length):
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rlib.objectmodel import keepalive_until_here

    # XXX: Hack to ensure that we get a proper effectinfo.write_descrs_arrays
    # and also, maybe, speed up very small cases
    if length <= 1:
        if length == 1:
            copy_item(source, dest, source_start, dest_start)
        return

    # supports non-overlapping copies only
    if not we_are_translated():
        if source == dest:
            assert (source_start + length <= dest_start or
                    dest_start + length <= source_start)

    TP = lltype.typeOf(source).TO
    assert TP == lltype.typeOf(dest).TO

    slowpath = False
    if must_split_gc_address_space():
        slowpath = True
    elif _contains_gcptr(TP.OF):
        # perform a write barrier that copies necessary flags from
        # source to dest
        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest,
                                                source_start, dest_start,
                                                length):
            slowpath = True
    if slowpath:
        # if the write barrier is not supported, or if we translate with
        # the option 'split_gc_address_space', then copy by hand
        i = 0
        while i < length:
            copy_item(source, dest, i + source_start, i + dest_start)
            i += 1
        return
    source_addr = llmemory.cast_ptr_to_adr(source)
    dest_addr   = llmemory.cast_ptr_to_adr(dest)
    cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
                      llmemory.sizeof(TP.OF) * source_start)
    cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
                    llmemory.sizeof(TP.OF) * dest_start)

    llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
                         llmemory.sizeof(TP.OF) * length)
    keepalive_until_here(source)
    keepalive_until_here(dest)
Esempio n. 23
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Esempio n. 24
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Esempio n. 25
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Esempio n. 26
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Esempio n. 27
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0,
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Esempio n. 28
0
File: rstr.py Progetto: soIu/rpython
 def copy_string_contents(src, dst, srcstart, dststart, length):
     """Copies 'length' characters from the 'src' string to the 'dst'
     string, starting at position 'srcstart' and 'dststart'."""
     # xxx Warning: don't try to do this at home.  It relies on a lot
     # of details to be sure that it works correctly in all cases.
     # Notably: no GC operation at all from the first cast_ptr_to_adr()
     # because it might move the strings.  The keepalive_until_here()
     # are obscurely essential to make sure that the strings stay alive
     # longer than the raw_memcopy().
     assert length >= 0
     ll_assert(srcstart >= 0, "copystrc: negative srcstart")
     ll_assert(srcstart + length <= len(src.chars), "copystrc: src ovf")
     ll_assert(dststart >= 0, "copystrc: negative dststart")
     ll_assert(dststart + length <= len(dst.chars), "copystrc: dst ovf")
     #
     # If the 'split_gc_address_space' option is set, we must copy
     # manually, character-by-character
     if rgc.must_split_gc_address_space():
         i = 0
         while i < length:
             dst.chars[dststart + i] = src.chars[srcstart + i]
             i += 1
         return
     #
     #
     # from here, no GC operations can happen
     asrc = _get_raw_buf(SRC_TP, src, srcstart)
     adst = _get_raw_buf(DST_TP, dst, dststart)
     llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
     # end of "no GC" section
     keepalive_until_here(src)
     keepalive_until_here(dst)
Esempio n. 29
0
    def test_foreach(self):
        AddressDeque = get_address_deque(10)
        ll = AddressDeque()
        for num_entries in range(30, -1, -1):
            addrs = [
                raw_malloc(llmemory.sizeof(lltype.Signed))
                for i in range(num_entries)
            ]
            for a in addrs:
                ll.append(a)

            seen = []

            def callback(addr, fortytwo):
                assert fortytwo == 42
                seen.append(addr)

            ll.foreach(callback, 42)
            assert seen == addrs
            seen = []
            ll.foreach(callback, 42, step=2)
            assert seen == addrs[::2]

            for a in addrs:
                b = ll.popleft()
                assert a == b
            assert not ll.non_empty()
Esempio n. 30
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a + 1)
    assert lt(a + 5, a + 20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a + 19, b)
    assert lt(a, b + 19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Esempio n. 31
0
File: rstr.py Progetto: soIu/rpython
 def ll_string2list(RESLIST, src):
     length = len(src.chars)
     lst = RESLIST.ll_newlist(length)
     dst = lst.ll_items()
     SRC = typeOf(src).TO  # STR or UNICODE
     DST = typeOf(dst).TO  # GcArray
     assert DST.OF is SRC.chars.OF
     #
     # If the 'split_gc_address_space' option is set, we must copy
     # manually, character-by-character
     if rgc.must_split_gc_address_space():
         i = 0
         while i < length:
             dst[i] = src.chars[i]
             i += 1
         return lst
     #
     # from here, no GC operations can happen
     asrc = llmemory.cast_ptr_to_adr(src) + (llmemory.offsetof(
         SRC, 'chars') + llmemory.itemoffsetof(SRC.chars, 0))
     adst = llmemory.cast_ptr_to_adr(dst) + llmemory.itemoffsetof(DST, 0)
     llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(DST.OF) * length)
     # end of "no GC" section
     keepalive_until_here(src)
     keepalive_until_here(dst)
     return lst
Esempio n. 32
0
    def decorate(targetcls):
        """
        Create and attach specialized versions of typed_{read,write}. We need to
        do this becase the JIT codewriters mandates that base_ofs is an
        RPython constant.
        """
        if targetcls.__bases__ != (GCBuffer,):
            raise ValueError("@GCBuffer.decorate should be used only on "
                             "GCBuffer subclasses")

        base_ofs = targetcls._get_gc_data_offset()
        scale_factor = llmemory.sizeof(lltype.Char)

        @specialize.ll_and_arg(1)
        def typed_read(self, TP, byte_offset):
            if not is_alignment_correct(TP, byte_offset):
                raise CannotRead
            lldata = self._get_gc_data()
            byte_offset += self._get_gc_data_extra_offset()
            return llop.gc_load_indexed(TP, lldata, byte_offset,
                                        scale_factor, base_ofs)

        @specialize.ll_and_arg(1)
        def typed_write(self, TP, byte_offset, value):
            if self.readonly or not is_alignment_correct(TP, byte_offset):
                raise CannotWrite
            lldata = self._get_gc_data()
            byte_offset += self._get_gc_data_extra_offset()
            value = lltype.cast_primitive(TP, value)
            return llop.gc_store_indexed(lltype.Void, lldata, byte_offset, value,
                                         scale_factor, base_ofs)

        targetcls.typed_read = typed_read
        targetcls.typed_write = typed_write
        return targetcls
Esempio n. 33
0
File: rstr.py Progetto: soIu/rpython
 def copy_string_to_raw(src, ptrdst, srcstart, length):
     """
     Copies 'length' characters from the 'src' string to the 'ptrdst'
     buffer, starting at position 'srcstart'.
     'ptrdst' must be a non-gc Array of Char.
     """
     # xxx Warning: same note as above apply: don't do this at home
     assert length >= 0
     #
     # If the 'split_gc_address_space' option is set, we must copy
     # manually, character-by-character
     if rgc.must_split_gc_address_space():
         i = 0
         while i < length:
             ptrdst[i] = src.chars[srcstart + i]
             i += 1
         return
     #
     # from here, no GC operations can happen
     asrc = _get_raw_buf(SRC_TP, src, srcstart)
     adst = llmemory.cast_ptr_to_adr(ptrdst)
     adst = adst + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0)
     llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
     # end of "no GC" section
     keepalive_until_here(src)
Esempio n. 34
0
    def test_keep_all_keepalives(self):
        SIZE = llmemory.sizeof(lltype.Signed)
        PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
        class A:
            def __init__(self):
                self.addr = llmemory.raw_malloc(SIZE)
            def __del__(self):
                llmemory.raw_free(self.addr)
        class B:
            pass
        def myfunc():
            b = B()
            b.keep = A()
            b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
            b.data[0] = 42
            ptr = b.data
            # normally 'b' could go away as early as here, which would free
            # the memory held by the instance of A in b.keep...
            res = ptr[0]
            # ...so we explicitly keep 'b' alive until here
            objectmodel.keepalive_until_here(b)
            return res
        graph = self.check(myfunc, [], [], 42,
                           must_be_removed=False)    # 'A' instance left

        # there is a getarrayitem near the end of the graph of myfunc.
        # However, the memory it accesses must still be protected by the
        # following keepalive, even after malloc removal
        entrymap = mkentrymap(graph)
        [link] = entrymap[graph.returnblock]
        assert link.prevblock.operations[-1].opname == 'keepalive'
Esempio n. 35
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Esempio n. 36
0
def sizeof(tp):
    """Similar to llmemory.sizeof() but tries hard to return a integer
    instead of a symbolic value.
    """
    if isinstance(tp, lltype.Typedef):
        tp = tp.OF
    if isinstance(tp, lltype.FixedSizeArray):
        return sizeof(tp.OF) * tp.length
    if isinstance(tp, lltype.Struct):
        # the hint is present in structures probed by rffi_platform.
        size = tp._hints.get('size')
        if size is None:
            size = llmemory.sizeof(tp)    # a symbolic result in this case
        return size
    if isinstance(tp, lltype.Ptr) or tp is llmemory.Address:
        return globals()['r_void*'].BITS/8
    if tp is lltype.Char or tp is lltype.Bool:
        return 1
    if tp is lltype.UniChar:
        return r_wchar_t.BITS/8
    if tp is lltype.Float:
        return 8
    if tp is lltype.SingleFloat:
        return 4
    if tp is lltype.LongFloat:
        # :-/
        return sizeof_c_type("long double")
    assert isinstance(tp, lltype.Number)
    if tp is lltype.Signed:
        return LONG_BIT/8
    return tp._type.BITS/8
Esempio n. 37
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Esempio n. 38
0
    def test_keep_all_keepalives(self):
        SIZE = llmemory.sizeof(lltype.Signed)
        PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
        class A:
            def __init__(self):
                self.addr = llmemory.raw_malloc(SIZE)
            def __del__(self):
                llmemory.raw_free(self.addr)
        class B:
            pass
        def myfunc():
            b = B()
            b.keep = A()
            b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
            b.data[0] = 42
            ptr = b.data
            # normally 'b' could go away as early as here, which would free
            # the memory held by the instance of A in b.keep...
            res = ptr[0]
            # ...so we explicitly keep 'b' alive until here
            objectmodel.keepalive_until_here(b)
            return res
        graph = self.check(myfunc, [], [], 42,
                           must_be_removed=False)    # 'A' instance left

        # there is a getarrayitem near the end of the graph of myfunc.
        # However, the memory it accesses must still be protected by the
        # following keepalive, even after malloc removal
        entrymap = mkentrymap(graph)
        [link] = entrymap[graph.returnblock]
        assert link.prevblock.operations[-1].opname == 'keepalive'
Esempio n. 39
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a+1)
    assert lt(a+5, a+20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a+19, b)
    assert lt(a, b+19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Esempio n. 40
0
File: ffi.py Progetto: kidaa/pixie
    def fb_build(self):
        # Build a CIF_DESCRIPTION.  Actually this computes the size and
        # allocates a larger amount of data.  It starts with a
        # CIF_DESCRIPTION and continues with data needed for the CIF:
        #
        #  - the argument types, as an array of 'ffi_type *'.
        #
        #  - optionally, the result's and the arguments' ffi type data
        #    (this is used only for 'struct' ffi types; in other cases the
        #    'ffi_type *' just points to static data like 'ffi_type_sint32').
        #
        nargs = len(self.fargs)

        # start with a cif_description (cif and exchange_* fields)
        self.fb_alloc(llmemory.sizeof(CIF_DESCRIPTION, nargs))

        # next comes an array of 'ffi_type*', one per argument
        atypes = self.fb_alloc(rffi.sizeof(FFI_TYPE_P) * nargs)
        self.atypes = rffi.cast(FFI_TYPE_PP, atypes)

        # next comes the result type data
        self.rtype = self.fb_fill_type(self.fresult, True)

        # next comes each argument's type data
        for i, farg in enumerate(self.fargs):
            atype = self.fb_fill_type(farg, False)
            if self.atypes:
                self.atypes[i] = atype
Esempio n. 41
0
def guess_size_obj(obj):
    TYPE = typeOf(obj)
    ptr = _ptr(Ptr(TYPE), obj)
    if TYPE._is_varsize():
        arrayfld = getattr(TYPE, '_arrayfld', None)
        if arrayfld:
            length = len(getattr(ptr, arrayfld))
        else:
            try:
                length = len(ptr)
            except TypeError:
                if TYPE._hints.get("nolength", False) and hasattr(
                        obj, "items"):
                    length = len(obj.items)
                else:
                    print "couldn't find size of", ptr
                    return 0
    else:
        length = None
    if type(TYPE) is llgroup.GroupType:
        return sum(guess_size_obj(m) for m in obj.members)
    #print obj, ', length =', length
    r = convert_offset_to_int(llmemory.sizeof(TYPE, length))
    #print '\tr =', r
    return r
Esempio n. 42
0
def sizeof(tp):
    """Similar to llmemory.sizeof() but tries hard to return a integer
    instead of a symbolic value.
    """
    if isinstance(tp, lltype.Typedef):
        tp = tp.OF
    if isinstance(tp, lltype.FixedSizeArray):
        return sizeof(tp.OF) * tp.length
    if isinstance(tp, lltype.Struct):
        # the hint is present in structures probed by rffi_platform.
        size = tp._hints.get('size')
        if size is None:
            size = llmemory.sizeof(tp)    # a symbolic result in this case
        return size
    if isinstance(tp, lltype.Ptr) or tp is llmemory.Address:
        return globals()['r_void*'].BITS/8
    if tp is lltype.Char or tp is lltype.Bool:
        return 1
    if tp is lltype.UniChar:
        return r_wchar_t.BITS/8
    if tp is lltype.Float:
        return 8
    if tp is lltype.SingleFloat:
        return 4
    if tp is lltype.LongFloat:
        # :-/
        return sizeof_c_type("long double")
    assert isinstance(tp, lltype.Number)
    if tp is lltype.Signed:
        return LONG_BIT/8
    return tp._type.BITS/8
Esempio n. 43
0
    def fb_build(self):
        # Build a CIF_DESCRIPTION.  Actually this computes the size and
        # allocates a larger amount of data.  It starts with a
        # CIF_DESCRIPTION and continues with data needed for the CIF:
        #
        #  - the argument types, as an array of 'ffi_type *'.
        #
        #  - optionally, the result's and the arguments' ffi type data
        #    (this is used only for 'struct' ffi types; in other cases the
        #    'ffi_type *' just points to static data like 'ffi_type_sint32').
        #
        nargs = len(self.fargs)

        # start with a cif_description (cif and exchange_* fields)
        self.fb_alloc(llmemory.sizeof(CIF_DESCRIPTION, nargs))

        # next comes an array of 'ffi_type*', one per argument
        atypes = self.fb_alloc(rffi.sizeof(FFI_TYPE_P) * nargs)
        self.atypes = rffi.cast(FFI_TYPE_PP, atypes)

        # next comes the result type data
        self.rtype = self.fb_fill_type(self.fresult, True)

        # next comes each argument's type data
        for i, farg in enumerate(self.fargs):
            atype = self.fb_fill_type(farg, False)
            if self.atypes:
                self.atypes[i] = atype
Esempio n. 44
0
def ll_shrink_array(p, smallerlength):
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rlib.objectmodel import keepalive_until_here

    if llop.shrink_array(lltype.Bool, p, smallerlength):
        return p  # done by the GC
    # XXX we assume for now that the type of p is GcStruct containing a
    # variable array, with no further pointers anywhere, and exactly one
    # field in the fixed part -- like STR and UNICODE.

    TP = lltype.typeOf(p).TO
    newp = lltype.malloc(TP, smallerlength)

    assert len(TP._names) == 2
    field = getattr(p, TP._names[0])
    setattr(newp, TP._names[0], field)

    ARRAY = getattr(TP, TP._arrayfld)
    offset = llmemory.offsetof(TP, TP._arrayfld) + llmemory.itemoffsetof(ARRAY, 0)
    source_addr = llmemory.cast_ptr_to_adr(p) + offset
    dest_addr = llmemory.cast_ptr_to_adr(newp) + offset
    llmemory.raw_memcopy(source_addr, dest_addr, llmemory.sizeof(ARRAY.OF) * smallerlength)

    keepalive_until_here(p)
    keepalive_until_here(newp)
    return newp
Esempio n. 45
0
def ll_shrink_array(p, smallerlength):
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rlib.objectmodel import keepalive_until_here

    if llop.shrink_array(lltype.Bool, p, smallerlength):
        return p  # done by the GC
    # XXX we assume for now that the type of p is GcStruct containing a
    # variable array, with no further pointers anywhere, and exactly one
    # field in the fixed part -- like STR and UNICODE.

    TP = lltype.typeOf(p).TO
    newp = lltype.malloc(TP, smallerlength)

    assert len(TP._names) == 2
    field = getattr(p, TP._names[0])
    setattr(newp, TP._names[0], field)

    ARRAY = getattr(TP, TP._arrayfld)
    offset = (llmemory.offsetof(TP, TP._arrayfld) +
              llmemory.itemoffsetof(ARRAY, 0))
    source_addr = llmemory.cast_ptr_to_adr(p) + offset
    dest_addr = llmemory.cast_ptr_to_adr(newp) + offset
    llmemory.raw_memcopy(source_addr, dest_addr,
                         llmemory.sizeof(ARRAY.OF) * smallerlength)

    keepalive_until_here(p)
    keepalive_until_here(newp)
    return newp
Esempio n. 46
0
 def __init__(self, arena_size, page_size, small_request_threshold):
     # 'small_request_threshold' is the largest size that we
     # can ask with self.malloc().
     self.arena_size = arena_size
     self.page_size = page_size
     self.small_request_threshold = small_request_threshold
     self.arenas_count = 0
     #
     # 'pageaddr_for_size': for each size N between WORD and
     # small_request_threshold (included), contains either NULL or
     # a pointer to a page that has room for at least one more
     # allocation of the given size.
     length = small_request_threshold / WORD + 1
     self.page_for_size = self._new_page_ptr_list(length)
     self.full_page_for_size = self._new_page_ptr_list(length)
     self.old_page_for_size = self._new_page_ptr_list(length)
     self.old_full_page_for_size = self._new_page_ptr_list(length)
     self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
                                           length,
                                           flavor='raw',
                                           immortal=True)
     self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
     assert page_size > self.hdrsize
     self.nblocks_for_size[0] = 0  # unused
     for i in range(1, length):
         self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
     #
     self.max_pages_per_arena = arena_size // page_size
     self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                       self.max_pages_per_arena,
                                       flavor='raw',
                                       zero=True,
                                       immortal=True)
     # this is used in mass_free() only
     self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                           self.max_pages_per_arena,
                                           flavor='raw',
                                           zero=True,
                                           immortal=True)
     #
     # the arena currently consumed; it must have at least one page
     # available, or be NULL.  The arena object that we point to is
     # not in any 'arenas_lists'.  We will consume all its pages before
     # we choose a next arena, even if there is a major collection
     # in-between.
     self.current_arena = ARENA_NULL
     #
     # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty
     self.min_empty_nfreepages = self.max_pages_per_arena
     #
     # part of current_arena might still contain uninitialized pages
     self.num_uninitialized_pages = 0
     #
     # the total memory used, counting every block in use, without
     # the additional bookkeeping stuff.
     self.total_memory_used = r_uint(0)
     self.peak_memory_used = r_uint(0)
     self.total_memory_alloced = r_uint(0)
     self.peak_memory_alloced = r_uint(0)
Esempio n. 47
0
class __extend__(pairtype(TypedAddressAccessRepr, IntegerRepr)):
    def rtype_getitem((r_acc, r_int), hop):
        v_addr, v_offs = hop.inputargs(hop.args_r[0], lltype.Signed)
        c_size = hop.inputconst(lltype.Signed, sizeof(r_acc.type))
        v_offs_mult = hop.genop('int_mul', [v_offs, c_size],
                                resulttype=lltype.Signed)
        return hop.genop('raw_load', [v_addr, v_offs_mult],
                         resulttype=r_acc.type)
Esempio n. 48
0
File: misc.py Progetto: kipras/pypy
def _raw_memcopy_opaque(source, dest, size):
    # push push push at the llmemory interface (with hacks that are all
    # removed after translation)
    zero = llmemory.itemoffsetof(rffi.CCHARP.TO, 0)
    llmemory.raw_memcopy(
        llmemory.cast_ptr_to_adr(source) + zero,
        llmemory.cast_ptr_to_adr(dest) + zero,
        size * llmemory.sizeof(lltype.Char))
Esempio n. 49
0
def ll_populate_list_from_raw_array(ll_list, src_ptr, length):
    ITEM = lltype.typeOf(src_ptr).TO.OF
    size = llmemory.sizeof(ITEM) * length
    ll_list._ll_resize(length)
    # start of no-GC section
    src_adr = get_raw_buf(src_ptr)
    dst_adr = get_raw_buf(ll_list.ll_items())
    llmemory.raw_memcopy(src_adr, dst_adr, size)
Esempio n. 50
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize,
                               llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 51
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize,
                               llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 52
0
def ll_copy_list_to_raw_array(ll_list, dst_ptr):
    # this code is delicate: we must ensure that there are no GC operations
    # around the call to raw_memcopy
    #
    ITEM = lltype.typeOf(dst_ptr).TO.OF
    size = llmemory.sizeof(ITEM) * ll_list.ll_length()
    # start of no-GC section
    src_adr = get_raw_buf(ll_list.ll_items())
    dst_adr = get_raw_buf(dst_ptr)
    llmemory.raw_memcopy(src_adr, dst_adr, size)
Esempio n. 53
0
def str_storage_getitem(TP, s, byte_offset):
    # WARNING: the 'byte_offset' is, as its name says, measured in bytes;
    # however, it should be aligned for TP, otherwise on some platforms this
    # code will crash!
    lls = llstr(s)
    base_ofs = (llmemory.offsetof(STR, 'chars') +
                llmemory.itemoffsetof(STR.chars, 0))
    scale_factor = llmemory.sizeof(lltype.Char)
    return llop.gc_load_indexed(TP, lls, byte_offset,
                                scale_factor, base_ofs)
Esempio n. 54
0
def ll_arrayclear(p):
    # Equivalent to memset(array, 0).  Only for GcArray(primitive-type) for now.
    from rpython.rlib.objectmodel import keepalive_until_here

    length = len(p)
    ARRAY = lltype.typeOf(p).TO
    offset = llmemory.itemoffsetof(ARRAY, 0)
    dest_addr = llmemory.cast_ptr_to_adr(p) + offset
    llmemory.raw_memclear(dest_addr, llmemory.sizeof(ARRAY.OF) * length)
    keepalive_until_here(p)
Esempio n. 55
0
 def gct_malloc(self, hop, add_flags=None):
     TYPE = hop.spaceop.result.concretetype.TO
     assert not TYPE._is_varsize()
     flags = hop.spaceop.args[1].value
     flavor = flags['flavor']
     meth = getattr(self, 'gct_fv_%s_malloc' % flavor, None)
     assert meth, "%s has no support for malloc with flavor %r" % (self, flavor)
     c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE))
     v_raw = meth(hop, flags, TYPE, c_size)
     hop.cast_result(v_raw)
Esempio n. 56
0
File: rgc.py Progetto: Qointum/pypy
def ll_arraycopy(source, dest, source_start, dest_start, length):
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rlib.objectmodel import keepalive_until_here

    # XXX: Hack to ensure that we get a proper effectinfo.write_descrs_arrays
    # and also, maybe, speed up very small cases
    if length <= 1:
        if length == 1:
            copy_item(source, dest, source_start, dest_start)
        return

    # supports non-overlapping copies only
    if not we_are_translated():
        if source == dest:
            assert (source_start + length <= dest_start or
                    dest_start + length <= source_start)

    TP = lltype.typeOf(source).TO
    assert TP == lltype.typeOf(dest).TO
    if _contains_gcptr(TP.OF):
        # perform a write barrier that copies necessary flags from
        # source to dest
        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest,
                                                source_start, dest_start,
                                                length):
            # if the write barrier is not supported, copy by hand
            i = 0
            while i < length:
                copy_item(source, dest, i + source_start, i + dest_start)
                i += 1
            return
    source_addr = llmemory.cast_ptr_to_adr(source)
    dest_addr   = llmemory.cast_ptr_to_adr(dest)
    cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
                      llmemory.sizeof(TP.OF) * source_start)
    cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
                    llmemory.sizeof(TP.OF) * dest_start)

    llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
                         llmemory.sizeof(TP.OF) * length)
    keepalive_until_here(source)
    keepalive_until_here(dest)
Esempio n. 57
0
 def test_length(self):
     AddressStack = get_address_stack(10)
     ll = AddressStack()
     a = raw_malloc(llmemory.sizeof(lltype.Signed))
     for i in range(42):
         assert ll.length() == i
         ll.append(a)
     for i in range(42-1, -1, -1):
         b = ll.pop()
         assert b == a
         assert ll.length() == i
Esempio n. 58
0
    def copy_raw_to_string(ptrsrc, dst, dststart, length):
        # xxx Warning: same note as above apply: don't do this at home
        assert length >= 0
        # from here, no GC operations can happen
        adst = _get_raw_buf(SRC_TP, dst, dststart)
        asrc = llmemory.cast_ptr_to_adr(ptrsrc)

        asrc = asrc + llmemory.itemoffsetof(typeOf(ptrsrc).TO, 0)
        llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
        # end of "no GC" section
        keepalive_until_here(dst)