Exemplo n.º 1
0
Arquivo: rstr.py Projeto: alkorzt/pypy
 def copy_string_contents(src, dst, srcstart, dststart, length):
     assert srcstart >= 0
     assert dststart >= 0
     assert length >= 0
     src = llmemory.cast_ptr_to_adr(src) + _str_ofs(srcstart)
     dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart)
     llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length)
Exemplo n.º 2
0
    def str_from_buffer(raw_buf, gc_buf, allocated_size, needed_size):
        """
        Converts from a pair returned by alloc_buffer to a high-level string.
        The returned string will be truncated to needed_size.
        """
        assert allocated_size >= needed_size

        if gc_buf and (allocated_size == needed_size):
            return hlstrtype(gc_buf)

        new_buf = lltype.malloc(STRTYPE, needed_size)
        try:
            str_chars_offset = (offsetof(STRTYPE, 'chars') + \
                                itemoffsetof(STRTYPE.chars, 0))
            if gc_buf:
                src = cast_ptr_to_adr(gc_buf) + str_chars_offset
            else:
                src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0)
            dest = cast_ptr_to_adr(new_buf) + str_chars_offset
            ## FIXME: This is bad, because dest could potentially move
            ## if there are threads involved.
            raw_memcopy(src, dest, llmemory.sizeof(ll_char_type) * needed_size)
            return hlstrtype(new_buf)
        finally:
            keepalive_until_here(new_buf)
Exemplo n.º 3
0
def ll_arraycopy(source, dest, source_start, dest_start, length):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rpython.lltypesystem import lltype, llmemory
    from pypy.rlib.objectmodel import keepalive_until_here

    # supports non-overlapping copies only
    if not we_are_translated():
        if source == dest:
            assert (source_start + length <= dest_start
                    or dest_start + length <= source_start)

    TP = lltype.typeOf(source).TO
    assert TP == lltype.typeOf(dest).TO
    if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
        # perform a write barrier that copies necessary flags from
        # source to dest
        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
            # if the write barrier is not supported, copy by hand
            for i in range(length):
                dest[i + dest_start] = source[i + source_start]
            return
    source_addr = llmemory.cast_ptr_to_adr(source)
    dest_addr = llmemory.cast_ptr_to_adr(dest)
    cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
                      llmemory.sizeof(TP.OF) * source_start)
    cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
                    llmemory.sizeof(TP.OF) * dest_start)

    llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
                         llmemory.sizeof(TP.OF) * length)
    keepalive_until_here(source)
    keepalive_until_here(dest)
Exemplo n.º 4
0
 def writebarrier_before_copy(self, source, dest, source_start, dest_start, length):
     if self.gc.needs_write_barrier:
         source_addr = llmemory.cast_ptr_to_adr(source)
         dest_addr = llmemory.cast_ptr_to_adr(dest)
         return self.gc.writebarrier_before_copy(source_addr, dest_addr, source_start, dest_start, length)
     else:
         return True
Exemplo n.º 5
0
Arquivo: gc.py Projeto: enyst/plexnet
 def get_address_of_gcref(self, gcref):
     assert lltype.typeOf(gcref) == llmemory.GCREF
     # first look in the hashtable, using an inexact hash (fails after
     # the object moves)
     addr = llmemory.cast_ptr_to_adr(gcref)
     hash = llmemory.cast_adr_to_int(addr)
     hash -= hash >> self.HASHTABLE_BITS
     hash &= self.HASHTABLE_SIZE - 1
     addr_ref = self.hashtable[hash]
     # the following test is safe anyway, because the addresses found
     # in the hashtable are always the addresses of nonmovable stuff
     # ('addr_ref' is an address inside self.list, not directly the
     # address of a real moving GC object -- that's 'addr_ref.address[0]'.)
     if addr_ref.address[0] == addr:
         return addr_ref
     # if it fails, add an entry to the list
     if self.nextindex == len(self.list):
         # reallocate first, increasing a bit the size every time
         self.oldlists.append(self.list)
         self.list = self.alloc_gcref_list(len(self.list) // 4 * 5)
         self.nextindex = 0
     # add it
     index = self.nextindex
     self.list[index] = gcref
     addr_ref = lltype.direct_ptradd(lltype.direct_arrayitems(self.list),
                                     index)
     addr_ref = llmemory.cast_ptr_to_adr(addr_ref)
     self.nextindex = index + 1
     # record it in the hashtable
     self.hashtable[hash] = addr_ref
     return addr_ref
Exemplo n.º 6
0
 def setinterior(self,
                 toplevelcontainer,
                 inneraddr,
                 INNERTYPE,
                 newvalue,
                 offsets=()):
     if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc'
             and isinstance(INNERTYPE, lltype.Ptr)
             and INNERTYPE.TO._gckind == 'gc'):
         #
         wb = True
         if self.has_write_barrier_from_array:
             for index in offsets:
                 if type(index) is not str:
                     assert (type(index) is int  # <- fast path
                             or lltype.typeOf(index) == lltype.Signed)
                     self.gc.write_barrier_from_array(
                         llmemory.cast_ptr_to_adr(newvalue),
                         llmemory.cast_ptr_to_adr(toplevelcontainer), index)
                     wb = False
                     break
         #
         if wb:
             self.gc.write_barrier(
                 llmemory.cast_ptr_to_adr(newvalue),
                 llmemory.cast_ptr_to_adr(toplevelcontainer))
     llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
Exemplo n.º 7
0
 def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc,
                collect_static_in_prebuilt_gc):
     gc = self.tester.gc
     layoutbuilder = self.tester.layoutbuilder
     if collect_static_in_prebuilt_gc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_gc(gc, addrofaddr)
     if collect_static_in_prebuilt_nongc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_nongc(gc, addrofaddr)
     if collect_stack_root:
         stackroots = self.tester.stackroots
         a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
         for i in range(len(a)):
             a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
         a_base = lltype.direct_arrayitems(a)
         for i in range(len(a)):
             ai = lltype.direct_ptradd(a_base, i)
             collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
         for i in range(len(a)):
             PTRTYPE = lltype.typeOf(stackroots[i])
             stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
         lltype.free(a, flavor='raw')
Exemplo n.º 8
0
 def walk_roots(self, collect_stack_root,
                collect_static_in_prebuilt_nongc,
                collect_static_in_prebuilt_gc):
     gc = self.tester.gc
     layoutbuilder = self.tester.layoutbuilder
     if collect_static_in_prebuilt_gc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_gc(gc, addrofaddr)
     if collect_static_in_prebuilt_nongc:
         for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
             if addrofaddr.address[0]:
                 collect_static_in_prebuilt_nongc(gc, addrofaddr)
     if collect_stack_root:
         stackroots = self.tester.stackroots
         a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
         for i in range(len(a)):
             a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
         a_base = lltype.direct_arrayitems(a)
         for i in range(len(a)):
             ai = lltype.direct_ptradd(a_base, i)
             collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
         for i in range(len(a)):
             PTRTYPE = lltype.typeOf(stackroots[i])
             stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
         lltype.free(a, flavor='raw')
Exemplo n.º 9
0
def ll_shrink_array(p, smallerlength):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rlib.objectmodel import keepalive_until_here

    if llop.shrink_array(lltype.Bool, p, smallerlength):
        return p  # done by the GC
    # XXX we assume for now that the type of p is GcStruct containing a
    # variable array, with no further pointers anywhere, and exactly one
    # field in the fixed part -- like STR and UNICODE.

    TP = lltype.typeOf(p).TO
    newp = lltype.malloc(TP, smallerlength)

    assert len(TP._names) == 2
    field = getattr(p, TP._names[0])
    setattr(newp, TP._names[0], field)

    ARRAY = getattr(TP, TP._arrayfld)
    offset = (llmemory.offsetof(TP, TP._arrayfld) +
              llmemory.itemoffsetof(ARRAY, 0))
    source_addr = llmemory.cast_ptr_to_adr(p) + offset
    dest_addr = llmemory.cast_ptr_to_adr(newp) + offset
    llmemory.raw_memcopy(source_addr, dest_addr,
                         llmemory.sizeof(ARRAY.OF) * smallerlength)

    keepalive_until_here(p)
    keepalive_until_here(newp)
    return newp
Exemplo n.º 10
0
def test_gc_pointers_inside():
    from pypy.rpython import rclass
    PT = lltype.Ptr(lltype.GcStruct('T'))
    S1 = lltype.GcStruct('S', ('x', PT), ('y', PT))
    S2 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable': True})
    accessor = rclass.FieldListAccessor()
    S3 = lltype.GcStruct('S', ('x', PT), ('y', PT),
                         hints={'immutable_fields': accessor})
    accessor.initialize(S3, ['x'])
    #
    s1 = lltype.malloc(S1)
    adr = llmemory.cast_ptr_to_adr(s1)
    lst = list(gc_pointers_inside(s1._obj, adr, mutable_only=True))
    expected = [
        adr + llmemory.offsetof(S1, 'x'), adr + llmemory.offsetof(S1, 'y')
    ]
    assert lst == expected or lst == expected[::-1]
    #
    s2 = lltype.malloc(S2)
    adr = llmemory.cast_ptr_to_adr(s2)
    lst = list(gc_pointers_inside(s2._obj, adr, mutable_only=True))
    assert lst == []
    #
    s3 = lltype.malloc(S3)
    adr = llmemory.cast_ptr_to_adr(s3)
    lst = list(gc_pointers_inside(s3._obj, adr, mutable_only=True))
    assert lst == [adr + llmemory.offsetof(S3, 'y')]
Exemplo n.º 11
0
 def get_address_of_gcref(self, gcref):
     assert lltype.typeOf(gcref) == llmemory.GCREF
     # first look in the hashtable, using an inexact hash (fails after
     # the object moves)
     addr = llmemory.cast_ptr_to_adr(gcref)
     hash = llmemory.cast_adr_to_int(addr)
     hash -= hash >> self.HASHTABLE_BITS
     hash &= self.HASHTABLE_SIZE - 1
     addr_ref = self.hashtable[hash]
     # the following test is safe anyway, because the addresses found
     # in the hashtable are always the addresses of nonmovable stuff
     # ('addr_ref' is an address inside self.list, not directly the
     # address of a real moving GC object -- that's 'addr_ref.address[0]'.)
     if addr_ref.address[0] == addr:
         return addr_ref
     # if it fails, add an entry to the list
     if self.nextindex == len(self.list):
         # reallocate first, increasing a bit the size every time
         self.oldlists.append(self.list)
         self.list = self.alloc_gcref_list(len(self.list) // 4 * 5)
         self.nextindex = 0
     # add it
     index = self.nextindex
     self.list[index] = gcref
     addr_ref = lltype.direct_ptradd(lltype.direct_arrayitems(self.list),
                                     index)
     addr_ref = llmemory.cast_ptr_to_adr(addr_ref)
     self.nextindex = index + 1
     # record it in the hashtable
     self.hashtable[hash] = addr_ref
     return addr_ref
Exemplo n.º 12
0
 def copy_string_contents(s1, s2, s1start, s2start, lgt):
     assert s1start >= 0
     assert s2start >= 0
     assert lgt >= 0
     src = llmemory.cast_ptr_to_adr(s1) + _str_ofs(s1start)
     dest = llmemory.cast_ptr_to_adr(s2) + _str_ofs(s2start)
     llmemory.raw_memcopy(src, dest, llmemory.sizeof(CHAR_TP) * lgt)
Exemplo n.º 13
0
def test_allocate_new_page():
    pagesize = hdrsize + 16
    arenasize = pagesize * 4 - 1

    #
    def checknewpage(page, size_class):
        size = WORD * size_class
        assert (ac._nuninitialized(page,
                                   size_class) == (pagesize - hdrsize) // size)
        assert page.nfree == 0
        page1 = page.freeblock - hdrsize
        assert llmemory.cast_ptr_to_adr(page) == page1
        assert page.nextpage == PAGE_NULL

    #
    ac = ArenaCollection(arenasize, pagesize, 99)
    assert ac.num_uninitialized_pages == 0
    assert ac.total_memory_used == 0
    #
    page = ac.allocate_new_page(5)
    checknewpage(page, 5)
    assert ac.num_uninitialized_pages == 2
    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
    assert ac.page_for_size[5] == page
    #
    page = ac.allocate_new_page(3)
    checknewpage(page, 3)
    assert ac.num_uninitialized_pages == 1
    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
    assert ac.page_for_size[3] == page
    #
    page = ac.allocate_new_page(4)
    checknewpage(page, 4)
    assert ac.num_uninitialized_pages == 0
    assert ac.page_for_size[4] == page
Exemplo n.º 14
0
    def test_prebuilt_list_of_addresses(self):
        from pypy.rpython.lltypesystem import llmemory

        TP = lltype.Struct('x', ('y', lltype.Signed))
        a = lltype.malloc(TP, flavor='raw', immortal=True)
        b = lltype.malloc(TP, flavor='raw', immortal=True)
        c = lltype.malloc(TP, flavor='raw', immortal=True)
        a_a = llmemory.cast_ptr_to_adr(a)
        a0 = llmemory.cast_ptr_to_adr(a)
        assert a_a is not a0
        assert a_a == a0
        a_b = llmemory.cast_ptr_to_adr(b)
        a_c = llmemory.cast_ptr_to_adr(c)

        d = {a_a: 3, a_b: 4, a_c: 5}
        d[a0] = 8

        def func(i):
            if i == 0:
                ptr = a
            else:
                ptr = b
            return d[llmemory.cast_ptr_to_adr(ptr)]

        py.test.raises(TypeError, self.interpret, func, [0])
Exemplo n.º 15
0
 def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue):
     if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc'
             and isinstance(INNERTYPE, lltype.Ptr)
             and INNERTYPE.TO._gckind == 'gc'):
         self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue),
                               llmemory.cast_ptr_to_adr(toplevelcontainer))
     llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
Exemplo n.º 16
0
    def consider_constant(self, TYPE, value, gc):
        if value is not lltype.top_container(value):
            return
        if id(value) in self.seen_roots:
            return
        self.seen_roots[id(value)] = True

        if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
            typeid = self.get_type_id(TYPE)
            hdr = gc.gcheaderbuilder.new_header(value)
            adr = llmemory.cast_ptr_to_adr(hdr)
            gc.init_gc_object_immortal(adr, typeid)
            self.all_prebuilt_gc.append(value)

        # The following collects the addresses of all the fields that have
        # a GC Pointer type, inside the current prebuilt object.  All such
        # fields are potential roots: unless the structure is immutable,
        # they could be changed later to point to GC heap objects.
        adr = llmemory.cast_ptr_to_adr(value._as_ptr())
        if TYPE._gckind == "gc":
            if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG:
                appendto = self.addresses_of_static_ptrs
            else:
                return
        else:
            appendto = self.addresses_of_static_ptrs_in_nongc
        for a in gc_pointers_inside(value, adr, mutable_only=True):
            appendto.append(a)
Exemplo n.º 17
0
 def writebarrier_before_copy(self, source, dest):
     if self.gc.needs_write_barrier:
         source_addr = llmemory.cast_ptr_to_adr(source)
         dest_addr = llmemory.cast_ptr_to_adr(dest)
         return self.gc.writebarrier_before_copy(source_addr, dest_addr)
     else:
         return True
Exemplo n.º 18
0
    def str_from_buffer(raw_buf, gc_buf, allocated_size, needed_size):
        """
        Converts from a pair returned by alloc_buffer to a high-level string.
        The returned string will be truncated to needed_size.
        """
        assert allocated_size >= needed_size

        if gc_buf and (allocated_size == needed_size):
            return hlstrtype(gc_buf)

        new_buf = lltype.malloc(STRTYPE, needed_size)
        try:
            str_chars_offset = (offsetof(STRTYPE, 'chars') + \
                                itemoffsetof(STRTYPE.chars, 0))
            if gc_buf:
                src = cast_ptr_to_adr(gc_buf) + str_chars_offset
            else:
                src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0)
            dest = cast_ptr_to_adr(new_buf) + str_chars_offset
            ## FIXME: This is bad, because dest could potentially move
            ## if there are threads involved.
            raw_memcopy(src, dest,
                        llmemory.sizeof(ll_char_type) * needed_size)
            return hlstrtype(new_buf)
        finally:
            keepalive_until_here(new_buf)
Exemplo n.º 19
0
 def copy_string_contents(src, dst, srcstart, dststart, length):
     assert srcstart >= 0
     assert dststart >= 0
     assert length >= 0
     src = llmemory.cast_ptr_to_adr(src) + _str_ofs(srcstart)
     dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart)
     llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length)
Exemplo n.º 20
0
def test_gc_pointers_inside():
    from pypy.rpython import rclass
    PT = lltype.Ptr(lltype.GcStruct('T'))
    S1 = lltype.GcStruct('S', ('x', PT), ('y', PT))
    S2 = lltype.GcStruct('S', ('x', PT), ('y', PT),
                         hints={'immutable': True})
    accessor = rclass.FieldListAccessor()
    S3 = lltype.GcStruct('S', ('x', PT), ('y', PT),
                         hints={'immutable_fields': accessor})
    accessor.initialize(S3, {'x': IR_IMMUTABLE})
    #
    s1 = lltype.malloc(S1)
    adr = llmemory.cast_ptr_to_adr(s1)
    lst = list(gc_pointers_inside(s1._obj, adr, mutable_only=True))
    expected = [adr + llmemory.offsetof(S1, 'x'),
                adr + llmemory.offsetof(S1, 'y')]
    assert lst == expected or lst == expected[::-1]
    #
    s2 = lltype.malloc(S2)
    adr = llmemory.cast_ptr_to_adr(s2)
    lst = list(gc_pointers_inside(s2._obj, adr, mutable_only=True))
    assert lst == []
    #
    s3 = lltype.malloc(S3)
    adr = llmemory.cast_ptr_to_adr(s3)
    lst = list(gc_pointers_inside(s3._obj, adr, mutable_only=True))
    assert lst == [adr + llmemory.offsetof(S3, 'y')]
Exemplo n.º 21
0
    def consider_constant(self, TYPE, value, gc):
        if value is not lltype.top_container(value):
            return
        if id(value) in self.seen_roots:
            return
        self.seen_roots[id(value)] = True

        if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
            typeid = self.get_type_id(TYPE)
            hdr = gc.gcheaderbuilder.new_header(value)
            adr = llmemory.cast_ptr_to_adr(hdr)
            gc.init_gc_object_immortal(adr, typeid)
            self.all_prebuilt_gc.append(value)

        # The following collects the addresses of all the fields that have
        # a GC Pointer type, inside the current prebuilt object.  All such
        # fields are potential roots: unless the structure is immutable,
        # they could be changed later to point to GC heap objects.
        adr = llmemory.cast_ptr_to_adr(value._as_ptr())
        if TYPE._gckind == "gc":
            if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG:
                appendto = self.addresses_of_static_ptrs
            else:
                return
        else:
            appendto = self.addresses_of_static_ptrs_in_nongc
        for a in gc_pointers_inside(value, adr, mutable_only=True):
            appendto.append(a)
Exemplo n.º 22
0
    def test_prebuilt_list_of_addresses(self):
        from pypy.rpython.lltypesystem import llmemory
        
        TP = lltype.Struct('x', ('y', lltype.Signed))
        a = lltype.malloc(TP, flavor='raw', immortal=True)
        b = lltype.malloc(TP, flavor='raw', immortal=True)
        c = lltype.malloc(TP, flavor='raw', immortal=True)
        a_a = llmemory.cast_ptr_to_adr(a)
        a0 = llmemory.cast_ptr_to_adr(a)
        assert a_a is not a0
        assert a_a == a0
        a_b = llmemory.cast_ptr_to_adr(b)
        a_c = llmemory.cast_ptr_to_adr(c)

        d = {a_a: 3, a_b: 4, a_c: 5}
        d[a0] = 8
        
        def func(i):
            if i == 0:
                ptr = a
            else:
                ptr = b
            return d[llmemory.cast_ptr_to_adr(ptr)]

        py.test.raises(TypeError, self.interpret, func, [0])
Exemplo n.º 23
0
def test_allocate_new_page():
    pagesize = hdrsize + 16
    arenasize = pagesize * 4 - 1
    #
    def checknewpage(page, size_class):
        size = WORD * size_class
        assert (ac._nuninitialized(page, size_class) ==
                    (pagesize - hdrsize) // size)
        assert page.nfree == 0
        page1 = page.freeblock - hdrsize
        assert llmemory.cast_ptr_to_adr(page) == page1
        assert page.nextpage == PAGE_NULL
    #
    ac = ArenaCollection(arenasize, pagesize, 99)
    assert ac.num_uninitialized_pages == 0
    assert ac.total_memory_used == 0
    #
    page = ac.allocate_new_page(5)
    checknewpage(page, 5)
    assert ac.num_uninitialized_pages == 2
    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
    assert ac.page_for_size[5] == page
    #
    page = ac.allocate_new_page(3)
    checknewpage(page, 3)
    assert ac.num_uninitialized_pages == 1
    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
    assert ac.page_for_size[3] == page
    #
    page = ac.allocate_new_page(4)
    checknewpage(page, 4)
    assert ac.num_uninitialized_pages == 0
    assert ac.page_for_size[4] == page
Exemplo n.º 24
0
Arquivo: rgc.py Projeto: ieure/pypy
def ll_arraycopy(source, dest, source_start, dest_start, length):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rlib.objectmodel import keepalive_until_here

    # supports non-overlapping copies only
    if not we_are_translated():
        if source == dest:
            assert (source_start + length <= dest_start or
                    dest_start + length <= source_start)

    TP = lltype.typeOf(source).TO
    assert TP == lltype.typeOf(dest).TO
    if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
        # perform a write barrier that copies necessary flags from
        # source to dest
        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
            # if the write barrier is not supported, copy by hand
            for i in range(length):
                dest[i + dest_start] = source[i + source_start]
            return
    source_addr = llmemory.cast_ptr_to_adr(source)
    dest_addr   = llmemory.cast_ptr_to_adr(dest)
    cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) +
                      llmemory.sizeof(TP.OF) * source_start)
    cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) +
                    llmemory.sizeof(TP.OF) * dest_start)
    
    llmemory.raw_memcopy(cp_source_addr, cp_dest_addr,
                         llmemory.sizeof(TP.OF) * length)
    keepalive_until_here(source)
    keepalive_until_here(dest)
Exemplo n.º 25
0
Arquivo: rgc.py Projeto: ieure/pypy
def ll_shrink_array(p, smallerlength):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rlib.objectmodel import keepalive_until_here

    if llop.shrink_array(lltype.Bool, p, smallerlength):
        return p    # done by the GC
    # XXX we assume for now that the type of p is GcStruct containing a
    # variable array, with no further pointers anywhere, and exactly one
    # field in the fixed part -- like STR and UNICODE.

    TP = lltype.typeOf(p).TO
    newp = lltype.malloc(TP, smallerlength)

    assert len(TP._names) == 2
    field = getattr(p, TP._names[0])
    setattr(newp, TP._names[0], field)

    ARRAY = getattr(TP, TP._arrayfld)
    offset = (llmemory.offsetof(TP, TP._arrayfld) +
              llmemory.itemoffsetof(ARRAY, 0))
    source_addr = llmemory.cast_ptr_to_adr(p)    + offset
    dest_addr   = llmemory.cast_ptr_to_adr(newp) + offset
    llmemory.raw_memcopy(source_addr, dest_addr, 
                         llmemory.sizeof(ARRAY.OF) * smallerlength)

    keepalive_until_here(p)
    keepalive_until_here(newp)
    return newp
Exemplo n.º 26
0
 def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue):
     if (
         lltype.typeOf(toplevelcontainer).TO._gckind == "gc"
         and isinstance(INNERTYPE, lltype.Ptr)
         and INNERTYPE.TO._gckind == "gc"
     ):
         self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer))
     llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
Exemplo n.º 27
0
 def setup():
     s1 = lltype.malloc(S)
     tx = lltype.malloc(T)
     tx.z = 42
     ty = lltype.malloc(T)
     s1.x = llmemory.cast_ptr_to_adr(tx)
     s1.y = llmemory.cast_ptr_to_adr(ty)
     return s1
Exemplo n.º 28
0
 def setup():
     s1 = lltype.malloc(S)
     tx = lltype.malloc(T)
     tx.z = 42
     ty = lltype.malloc(T)
     s1.x = llmemory.cast_ptr_to_adr(tx)
     s1.y = llmemory.cast_ptr_to_adr(ty)
     return s1
Exemplo n.º 29
0
    def malloc(self, typeid, length=0, zero=False, coallocator=None):
        """For testing.  The interface used by the gctransformer is
        the four malloc_[fixed,var]size[_clear]() functions.
        And (if they exist) to the coalloc_[fixed,var]size_clear functions
        """
        # Rules about fallbacks in case of missing malloc methods:
        #  * malloc_fixedsize_clear() and malloc_varsize_clear() are mandatory
        #  * malloc_fixedsize() and malloc_varsize() fallback to the above
        #  * coalloc_fixedsize_clear() and coalloc_varsize_clear() are optional
        # There is no non-clear version of coalloc for now.
        # XXX: as of r49360, gctransformer.framework never inserts calls
        # to malloc_varsize(), but always uses malloc_varsize_clear()

        size = self.fixed_size(typeid)
        needs_finalizer = bool(self.getfinalizer(typeid))
        weakptr_offset = self.weakpointer_offset(typeid)
        #XXX cannot compare weakptr_offset with -1
        #contains_weakptr = weakpointer_offset. != -1
        if isinstance(weakptr_offset, int):
            assert weakptr_offset == -1
            contains_weakptr = False
        else:
            contains_weakptr = True
        assert not (needs_finalizer and contains_weakptr)
        if self.is_varsize(typeid):
            assert not contains_weakptr
            itemsize = self.varsize_item_sizes(typeid)
            offset_to_length = self.varsize_offset_to_length(typeid)
            if (coallocator is not None
                    and hasattr(self, "coalloc_varsize_clear")):
                assert not needs_finalizer
                coallocator = llmemory.cast_ptr_to_adr(coallocator)
                ref = self.coalloc_varsize_clear(coallocator, typeid, length,
                                                 size, itemsize,
                                                 offset_to_length)
            else:
                if zero or not hasattr(self, 'malloc_varsize'):
                    malloc_varsize = self.malloc_varsize_clear
                else:
                    malloc_varsize = self.malloc_varsize
                ref = malloc_varsize(typeid, length, size, itemsize,
                                     offset_to_length, True, needs_finalizer)
        else:
            if (coallocator is not None
                    and hasattr(self, "coalloc_fixedsize_clear")):
                assert not needs_finalizer
                coallocator = llmemory.cast_ptr_to_adr(coallocator)
                ref = self.coalloc_fixedsize_clear(coallocator, typeid, size)
            else:
                if zero or not hasattr(self, 'malloc_fixedsize'):
                    malloc_fixedsize = self.malloc_fixedsize_clear
                else:
                    malloc_fixedsize = self.malloc_fixedsize
                ref = malloc_fixedsize(typeid, size, True, needs_finalizer,
                                       contains_weakptr)
        # lots of cast and reverse-cast around...
        return llmemory.cast_ptr_to_adr(ref)
Exemplo n.º 30
0
 def func(i):
     d = {}
     d[llmemory.cast_ptr_to_adr(a)] = 123
     d[llmemory.cast_ptr_to_adr(b)] = 456
     if i > 5:
         key = llmemory.cast_ptr_to_adr(a)
     else:
         key = llmemory.cast_ptr_to_adr(b)
     return d[key]
Exemplo n.º 31
0
    def malloc(self, typeid, length=0, zero=False, coallocator=None):
        """For testing.  The interface used by the gctransformer is
        the four malloc_[fixed,var]size[_clear]() functions.
        And (if they exist) to the coalloc_[fixed,var]size_clear functions
        """
        # Rules about fallbacks in case of missing malloc methods:
        #  * malloc_fixedsize_clear() and malloc_varsize_clear() are mandatory
        #  * malloc_fixedsize() and malloc_varsize() fallback to the above
        #  * coalloc_fixedsize_clear() and coalloc_varsize_clear() are optional
        # There is no non-clear version of coalloc for now.
        # XXX: as of r49360, gctransformer.framework never inserts calls
        # to malloc_varsize(), but always uses malloc_varsize_clear()

        size = self.fixed_size(typeid)
        needs_finalizer = bool(self.getfinalizer(typeid))
        weakptr_offset = self.weakpointer_offset(typeid)
        #XXX cannot compare weakptr_offset with -1
        #contains_weakptr = weakpointer_offset. != -1
        if isinstance(weakptr_offset, int):
            assert weakptr_offset == -1
            contains_weakptr = False
        else:
            contains_weakptr = True
        assert not (needs_finalizer and contains_weakptr)
        if self.is_varsize(typeid):
            assert not contains_weakptr
            itemsize = self.varsize_item_sizes(typeid)
            offset_to_length = self.varsize_offset_to_length(typeid)
            if (coallocator is not None and
                hasattr(self, "coalloc_varsize_clear")):
                assert not needs_finalizer
                coallocator = llmemory.cast_ptr_to_adr(coallocator)
                ref = self.coalloc_varsize_clear(coallocator, typeid,
                                                 length, size,
                                                 itemsize, offset_to_length)
            else:
                if zero or not hasattr(self, 'malloc_varsize'):
                    malloc_varsize = self.malloc_varsize_clear
                else:
                    malloc_varsize = self.malloc_varsize
                ref = malloc_varsize(typeid, length, size, itemsize,
                                     offset_to_length, True, needs_finalizer)
        else:
            if (coallocator is not None and
                hasattr(self, "coalloc_fixedsize_clear")):
                assert not needs_finalizer
                coallocator = llmemory.cast_ptr_to_adr(coallocator)
                ref = self.coalloc_fixedsize_clear(coallocator, typeid, size)
            else:
                if zero or not hasattr(self, 'malloc_fixedsize'):
                    malloc_fixedsize = self.malloc_fixedsize_clear
                else:
                    malloc_fixedsize = self.malloc_fixedsize
                ref = malloc_fixedsize(typeid, size, True, needs_finalizer,
                                       contains_weakptr)
        # lots of cast and reverse-cast around...
        return llmemory.cast_ptr_to_adr(ref)
Exemplo n.º 32
0
 def writearray(self, p, index, newvalue):
     if self.gc.needs_write_barrier:
         newaddr = llmemory.cast_ptr_to_adr(newvalue)
         addr_struct = llmemory.cast_ptr_to_adr(p)
         if hasattr(self.gc, 'write_barrier_from_array'):
             self.gc.write_barrier_from_array(newaddr, addr_struct, index)
         else:
             self.gc.write_barrier(newaddr, addr_struct)
     p[index] = newvalue
Exemplo n.º 33
0
 def func(i):
     d = {}
     d[llmemory.cast_ptr_to_adr(a)] = 123
     d[llmemory.cast_ptr_to_adr(b)] = 456
     if i > 5:
         key = llmemory.cast_ptr_to_adr(a)
     else:
         key = llmemory.cast_ptr_to_adr(b)
     return d[key]
Exemplo n.º 34
0
 def writearray(self, p, index, newvalue):
     if self.gc.needs_write_barrier:
         newaddr = llmemory.cast_ptr_to_adr(newvalue)
         addr_struct = llmemory.cast_ptr_to_adr(p)
         if hasattr(self.gc, 'write_barrier_from_array'):
             self.gc.write_barrier_from_array(newaddr, addr_struct, index)
         else:
             self.gc.write_barrier(newaddr, addr_struct)
     p[index] = newvalue
Exemplo n.º 35
0
 def do_write_barrier(self, gcref_struct, gcref_newptr):
     hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct)
     hdr_addr -= self.gcheaderbuilder.size_gc_header
     hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR)
     if hdr.tid & self.GCClass.JIT_WB_IF_FLAG:
         # get a pointer to the 'remember_young_pointer' function from
         # the GC, and call it immediately
         llop1 = self.llop1
         funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR)
         funcptr(llmemory.cast_ptr_to_adr(gcref_struct))
Exemplo n.º 36
0
Arquivo: gc.py Projeto: Debug-Orz/Sypy
 def do_write_barrier(self, gcref_struct, gcref_newptr):
     hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct)
     hdr_addr -= self.gcheaderbuilder.size_gc_header
     hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR)
     if hdr.tid & self.GCClass.JIT_WB_IF_FLAG:
         # get a pointer to the 'remember_young_pointer' function from
         # the GC, and call it immediately
         llop1 = self.llop1
         funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR)
         funcptr(llmemory.cast_ptr_to_adr(gcref_struct))
Exemplo n.º 37
0
def longername(a, b, size):
    if 1:
        baseofs = itemoffsetof(TP, 0)
        onesize = sizeof(TP.OF)
        size = baseofs + onesize*(size - 1)
        raw_memcopy(cast_ptr_to_adr(b)+baseofs, cast_ptr_to_adr(a)+baseofs, size)
    else:
        a = []
        for i in range(x):
            a.append(i)
    return 0
Exemplo n.º 38
0
def longername(a, b, size):
    if 1:
        baseofs = itemoffsetof(TP, 0)
        onesize = sizeof(TP.OF)
        size = baseofs + onesize * (size - 1)
        raw_memcopy(
            cast_ptr_to_adr(b) + baseofs,
            cast_ptr_to_adr(a) + baseofs, size)
    else:
        a = []
        for i in range(x):
            a.append(i)
    return 0
Exemplo n.º 39
0
 def f():
     x = lltype.malloc(S)
     x.x = 10
     y = lltype.malloc(S)
     y.x = 20
     z = x
     llop.gc_x_become(lltype.Void,
                      llmemory.cast_ptr_to_adr(x),
                      llmemory.cast_ptr_to_adr(y))
     # keep 'y' alive until the x_become() is finished, because in
     # theory it could go away as soon as only its address is present
     objectmodel.keepalive_until_here(y)
     return z.x
Exemplo n.º 40
0
def _ll_list_resize_really(l, newsize):
    """
    Ensure l.items has room for at least newsize elements, and set
    l.length to newsize.  Note that l.items may change, and even if
    newsize is less than l.length on entry.
    """
    allocated = len(l.items)

    # This over-allocates proportional to the list size, making room
    # for additional growth.  The over-allocation is mild, but is
    # enough to give linear-time amortized behavior over a long
    # sequence of appends() in the presence of a poorly-performing
    # system malloc().
    # The growth pattern is:  0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
    if newsize <= 0:
        ll_assert(newsize == 0, "negative list length")
        l.length = 0
        l.items = _ll_new_empty_item_array(typeOf(l).TO)
        return
    else:
        if newsize < 9:
            some = 3
        else:
            some = 6
        some += newsize >> 3
        try:
            new_allocated = ovfcheck(newsize + some)
        except OverflowError:
            raise MemoryError
    # XXX consider to have a real realloc
    items = l.items
    newitems = malloc(typeOf(l).TO.items.TO, new_allocated)
    before_len = l.length
    if before_len < new_allocated:
        p = before_len - 1
    else:
        p = new_allocated - 1
    ITEM = typeOf(l).TO.ITEM
    if isinstance(ITEM, Ptr):
        while p >= 0:
            newitems[p] = items[p]
            items[p] = nullptr(ITEM.TO)
            p -= 1
    else:
        source = cast_ptr_to_adr(items) + itemoffsetof(typeOf(l.items).TO, 0)
        dest = cast_ptr_to_adr(newitems) + itemoffsetof(typeOf(l.items).TO, 0)
        s = p + 1
        raw_memcopy(source, dest, sizeof(ITEM) * s)
        keepalive_until_here(items)
    l.length = newsize
    l.items = newitems
Exemplo n.º 41
0
def _ll_list_resize_really(l, newsize):
    """
    Ensure l.items has room for at least newsize elements, and set
    l.length to newsize.  Note that l.items may change, and even if
    newsize is less than l.length on entry.
    """
    allocated = len(l.items)

    # This over-allocates proportional to the list size, making room
    # for additional growth.  The over-allocation is mild, but is
    # enough to give linear-time amortized behavior over a long
    # sequence of appends() in the presence of a poorly-performing
    # system malloc().
    # The growth pattern is:  0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
    if newsize <= 0:
        ll_assert(newsize == 0, "negative list length")
        l.length = 0
        l.items = _ll_new_empty_item_array(typeOf(l).TO)
        return
    else:
        if newsize < 9:
            some = 3
        else:
            some = 6
        some += newsize >> 3
        try:
            new_allocated = ovfcheck(newsize + some)
        except OverflowError:
            raise MemoryError
    # XXX consider to have a real realloc
    items = l.items
    newitems = malloc(typeOf(l).TO.items.TO, new_allocated)
    before_len = l.length
    if before_len < new_allocated:
        p = before_len - 1
    else:
        p = new_allocated - 1
    ITEM = typeOf(l).TO.ITEM
    if isinstance(ITEM, Ptr):
        while p >= 0:
            newitems[p] = items[p]
            items[p] = nullptr(ITEM.TO)
            p -= 1
    else:
        source = cast_ptr_to_adr(items) + itemoffsetof(typeOf(l.items).TO, 0)
        dest = cast_ptr_to_adr(newitems) + itemoffsetof(typeOf(l.items).TO, 0)
        s = p + 1
        raw_memcopy(source, dest, sizeof(ITEM) * s)
        keepalive_until_here(items)
    l.length = newsize
    l.items = newitems
Exemplo n.º 42
0
 def produce_into(self, builder, r):
     fail_subset = builder.subset_of_intvars(r)
     subset, f, exc = self.raising_func_code(builder, r)
     TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void)
     ptr = llhelper(lltype.Ptr(TP), f)
     c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
     args = [c_addr] + subset
     descr = builder.cpu.calldescrof(TP, TP.ARGS, TP.RESULT)
     self.put(builder, args, descr)
     exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
     op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
                       descr=BasicFailDescr())
     op.setfailargs(fail_subset)
     builder.loop.operations.append(op)
Exemplo n.º 43
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)

    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b

    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1],
                llmemory.raw_malloc_usage(precomputed_size) * 2, False)
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0],
                llmemory.raw_malloc_usage(precomputed_size) * 2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Exemplo n.º 44
0
 def produce_into(self, builder, r):
     fail_subset = builder.subset_of_intvars(r)
     subset, f, exc = self.raising_func_code(builder, r)
     TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void)
     ptr = llhelper(lltype.Ptr(TP), f)
     c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
     args = [c_addr] + subset
     descr = self.getcalldescr(builder, TP)
     self.put(builder, args, descr)
     exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
     op = ResOperation(rop.GUARD_EXCEPTION, [exc_box],
                       BoxPtr(),
                       descr=BasicFailDescr())
     op.setfailargs(fail_subset)
     builder.loop.operations.append(op)
Exemplo n.º 45
0
 def produce_into(self, builder, r):
     subset, f, exc = self.raising_func_code(builder, r)
     TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void)
     ptr = llhelper(lltype.Ptr(TP), f)
     c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu)
     args = [c_addr] + subset
     descr = self.getcalldescr(builder, TP)
     self.put(builder, args, descr)
     op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(),
                       descr=BasicFailDescr())
     op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
     op.setfailargs(builder.subset_of_intvars(r))
     builder.should_fail_by = op
     builder.guard_op = op
     builder.loop.operations.append(op)
Exemplo n.º 46
0
    def malloc(self, typeid, length=0, zero=False):
        """For testing.  The interface used by the gctransformer is
        the four malloc_[fixed,var]size[_clear]() functions.
        """
        # Rules about fallbacks in case of missing malloc methods:
        #  * malloc_fixedsize_clear() and malloc_varsize_clear() are mandatory
        #  * malloc_fixedsize() and malloc_varsize() fallback to the above
        # XXX: as of r49360, gctransformer.framework never inserts calls
        # to malloc_varsize(), but always uses malloc_varsize_clear()

        size = self.fixed_size(typeid)
        needs_finalizer = bool(self.getfinalizer(typeid))
        contains_weakptr = self.weakpointer_offset(typeid) >= 0
        assert not (needs_finalizer and contains_weakptr)
        if self.is_varsize(typeid):
            assert not contains_weakptr
            assert not needs_finalizer
            itemsize = self.varsize_item_sizes(typeid)
            offset_to_length = self.varsize_offset_to_length(typeid)
            if zero or not hasattr(self, 'malloc_varsize'):
                malloc_varsize = self.malloc_varsize_clear
            else:
                malloc_varsize = self.malloc_varsize
            ref = malloc_varsize(typeid, length, size, itemsize,
                                 offset_to_length, True)
        else:
            if zero or not hasattr(self, 'malloc_fixedsize'):
                malloc_fixedsize = self.malloc_fixedsize_clear
            else:
                malloc_fixedsize = self.malloc_fixedsize
            ref = malloc_fixedsize(typeid, size, True, needs_finalizer,
                                   contains_weakptr)
        # lots of cast and reverse-cast around...
        return llmemory.cast_ptr_to_adr(ref)
Exemplo n.º 47
0
 class FakeJitDriverSD:
     portal_runner_ptr = llhelper(lltype.Ptr(FUNC), ll_portal_runner)
     portal_runner_adr = llmemory.cast_ptr_to_adr(portal_runner_ptr)
     portal_calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, None)
     portal_finishtoken = compile.DoneWithThisFrameDescrInt()
     num_red_args = 2
     result_type = INT
Exemplo n.º 48
0
    def setup_method(self, method):
        cpu = CPU(None, None)
        cpu.vtable_offset = WORD
        cpu.gc_ll_descr = GCDescrFastpathMalloc()
        cpu.setup_once()

        # hack: specify 'tid' explicitly, because this test is not running
        # with the gc transformer
        NODE = lltype.GcStruct('node', ('tid', lltype.Signed),
                                       ('value', lltype.Signed))
        nodedescr = cpu.sizeof(NODE)
        valuedescr = cpu.fielddescrof(NODE, 'value')

        self.cpu = cpu
        self.nodedescr = nodedescr
        vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
        vtable_int = cpu.cast_adr_to_int(llmemory.cast_ptr_to_adr(vtable))
        NODE2 = lltype.GcStruct('node2',
                                  ('parent', rclass.OBJECT),
                                  ('tid', lltype.Signed),
                                  ('vtable', lltype.Ptr(rclass.OBJECT_VTABLE)))
        descrsize = cpu.sizeof(NODE2)
        heaptracker.register_known_gctype(cpu, vtable, NODE2)
        self.descrsize = descrsize
        self.vtable_int = vtable_int

        self.namespace = locals().copy()
Exemplo n.º 49
0
 def get_write_barrier_from_array_fn(self, cpu):
     # returns a function with arguments [array, index, newvalue]
     llop1 = self.llop1
     funcptr = llop1.get_write_barrier_from_array_failing_case(
         self.WB_ARRAY_FUNCPTR)
     funcaddr = llmemory.cast_ptr_to_adr(funcptr)
     return cpu.cast_adr_to_int(funcaddr)    # this may return 0
Exemplo n.º 50
0
    def enum_content(self, o, name='', with_header=True):
        # XXX clean up
        T = lltype.typeOf(o)
        if (self.size_gc_header is not None and with_header
            and isinstance(T, lltype.ContainerType) and T._gckind == 'gc'):
            adr = llmemory.cast_ptr_to_adr(o._as_ptr())
            adr -= self.size_gc_header
            o = adr.get()._obj
            T = lltype.typeOf(o)
        if isinstance(T, lltype.Struct):
            try:
                gcobjptr = header2obj[o]
                fmt = '(%s)'
            except KeyError:
                gcobjptr = None
                fmt = '%s'
            for name in T._names:
                for name, value in self.enum_content(getattr(o, name), name,
                                                     with_header=False):
                    yield fmt % (name,), value
            if gcobjptr:
                GCT = lltype.typeOf(gcobjptr)
                if self.size_gc_header is not None:
                    for sub in self.enum_content(gcobjptr._obj,
                                                 with_header=False):
                        yield sub
                else:
                    # display as a link to avoid the same data showing up
                    # twice in the graph
                    yield 'header of', gcobjptr._obj
        elif isinstance(T, lltype.Array):
            for index, o1 in enumerate(o.items):
                for sub in self.enum_content(o1, str(index)):
                    yield sub
        elif isinstance(T, lltype.Ptr):
            if not o:
                yield name, 'null'
            else:
                yield name, self.normalize(lltype.normalizeptr(o)._obj)
        elif isinstance(T, lltype.OpaqueType) and hasattr(o, 'container'):
            T = lltype.typeOf(o.container)
            yield 'container', '<%s>' % (shorttypename(T),)
            for sub in self.enum_content(o.container, name, with_header=False):
                yield sub
        elif T == llmemory.Address:
            if not o:
                yield name, 'NULL'
            else:
                addrof = o.ref()
                T1 = lltype.typeOf(addrof)
                if (isinstance(T1, lltype.Ptr) and
                    isinstance(T1.TO, lltype.Struct) and
                    addrof._obj in header2obj):
                    yield name + ' @hdr', self.normalize(addrof._obj)
                else:
                    yield name + ' @', self.normalize(o.ptr._obj)
##                     if o.offset:
##                         yield '... offset', str(o.offset)
        else:
            yield name, str(o)
Exemplo n.º 51
0
 def get_write_barrier_from_array_fn(self, cpu):
     # returns a function with arguments [array, index, newvalue]
     llop1 = self.llop1
     funcptr = llop1.get_write_barrier_from_array_failing_case(
         self.WB_ARRAY_FUNCPTR)
     funcaddr = llmemory.cast_ptr_to_adr(funcptr)
     return cpu.cast_adr_to_int(funcaddr)    # this may return 0
Exemplo n.º 52
0
 def heap_stats(self):
     self._tracked_dict = self.AddressDict()
     max_tid = self.root_walker.gcdata.max_type_id
     ll_typeid_map = lltype.malloc(ARRAY_TYPEID_MAP, max_tid, zero=True)
     for i in range(max_tid):
         ll_typeid_map[i] = lltype.malloc(TYPEID_MAP, max_tid, zero=True)
     self._ll_typeid_map = ll_typeid_map
     self._tracked_dict.add(llmemory.cast_ptr_to_adr(ll_typeid_map))
     i = 0
     while i < max_tid:
         self._tracked_dict.add(llmemory.cast_ptr_to_adr(ll_typeid_map[i]))
         i += 1
     self.enumerate_all_roots(SemiSpaceGC._track_heap_root, self)
     self._ll_typeid_map = lltype.nullptr(ARRAY_TYPEID_MAP)
     self._tracked_dict.delete()
     return ll_typeid_map
Exemplo n.º 53
0
 def setup():
     s = lltype.nullptr(S)
     for i in range(10000):
         t = lltype.malloc(S)
         t.x = llmemory.cast_ptr_to_adr(s)
         s = t
     return s
Exemplo n.º 54
0
 def __init__(self, warmrunnerdesc):
     self.warmrunnerdesc = warmrunnerdesc
     self.cpu = warmrunnerdesc.cpu
     # we make the low-level type of an RPython class directly
     self.JIT_VIRTUAL_REF = lltype.GcStruct(
         'JitVirtualRef', ('super', rclass.OBJECT),
         ('virtual_token', lltype.Signed), ('forced', rclass.OBJECTPTR))
     self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE,
                                                 zero=True,
                                                 flavor='raw',
                                                 immortal=True)
     self.jit_virtual_ref_vtable.name = rclass.alloc_array_name(
         'jit_virtual_ref')
     # build some constants
     adr = llmemory.cast_ptr_to_adr(self.jit_virtual_ref_vtable)
     adr = heaptracker.adr2int(adr)
     self.jit_virtual_ref_const_class = history.ConstInt(adr)
     fielddescrof = self.cpu.fielddescrof
     self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF,
                                             'virtual_token')
     self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced')
     #
     # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too
     if hasattr(self.warmrunnerdesc, 'rtyper'):  # <-- for tests
         self.warmrunnerdesc.rtyper.set_type_for_typeptr(
             self.jit_virtual_ref_vtable, self.JIT_VIRTUAL_REF)
Exemplo n.º 55
0
 def identityhash(self, gcobj):
     # The following code should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         #
         if hdr.tid & GCFLAG_HASHFIELD:  # the hash is in a field at the end
             obj += self.get_size(obj)
             return obj.signed[0]
         #
         if not (hdr.tid & GCFLAG_HASHTAKEN):
             # It's the first time we ask for a hash, and it's not an
             # external object.  Shrink the top of space by the extra
             # hash word that will be needed after a collect.
             shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
             if shrunk_top < self.free:
                 # Cannot shrink!  Do a collection, asking for at least
                 # one word of free space, and try again.  May raise
                 # MemoryError.  Obscure: not called directly, but
                 # across an llop, to make sure that there is the
                 # correct push_roots/pop_roots around the call...
                 llop.gc_obtain_free_space(llmemory.Address,
                                           llmemory.sizeof(lltype.Signed))
                 continue
             # Now we can have side-effects: set GCFLAG_HASHTAKEN
             # and lower the top of space.
             self.top_of_space = shrunk_top
             hdr.tid |= GCFLAG_HASHTAKEN
         #
         return llmemory.cast_adr_to_int(obj)  # direct case
Exemplo n.º 56
0
 def identityhash(self, gcobj):
     # The following code should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         #
         if hdr.tid & GCFLAG_HASHFIELD:  # the hash is in a field at the end
             obj += self.get_size(obj)
             return obj.signed[0]
         #
         if not (hdr.tid & GCFLAG_HASHTAKEN):
             # It's the first time we ask for a hash, and it's not an
             # external object.  Shrink the top of space by the extra
             # hash word that will be needed after a collect.
             shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
             if shrunk_top < self.free:
                 # Cannot shrink!  Do a collection, asking for at least
                 # one word of free space, and try again.  May raise
                 # MemoryError.  Obscure: not called directly, but
                 # across an llop, to make sure that there is the
                 # correct push_roots/pop_roots around the call...
                 llop.gc_obtain_free_space(llmemory.Address,
                                           llmemory.sizeof(lltype.Signed))
                 continue
             # Now we can have side-effects: set GCFLAG_HASHTAKEN
             # and lower the top of space.
             self.top_of_space = shrunk_top
             hdr.tid |= GCFLAG_HASHTAKEN
         #
         return llmemory.cast_adr_to_int(obj)  # direct case
Exemplo n.º 57
0
Arquivo: hybrid.py Projeto: ieure/pypy
 def malloc_nonmovable(self, typeid, length, zero):
     # helper for testing, same as GCBase.malloc
     if self.is_varsize(typeid):
         gcref = self.malloc_varsize_slowpath(typeid, length, True)
     else:
         raise NotImplementedError("Not supported")
     return llmemory.cast_ptr_to_adr(gcref)
Exemplo n.º 58
0
 def identityhash(self, gcobj):
     # The following loop should run at most twice.
     while 1:
         obj = llmemory.cast_ptr_to_adr(gcobj)
         hdr = self.header(obj)
         if hdr.tid & GCFLAG_HASHMASK:
             break
         # It's the first time we ask for a hash, and it's not an
         # external object.  Shrink the top of space by the extra
         # hash word that will be needed after a collect.
         shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
         if shrunk_top < self.free:
             # Cannot shrink!  Do a collection, asking for at least
             # one word of free space, and try again.  May raise
             # MemoryError.  Obscure: not called directly, but
             # across an llop, to make sure that there is the
             # correct push_roots/pop_roots around the call...
             llop.gc_obtain_free_space(llmemory.Address,
                                       llmemory.sizeof(lltype.Signed))
             continue
         else:
             # Now we can have side-effects: lower the top of space
             # and set one of the GC_HASH_TAKEN_xxx flags.
             self.top_of_space = shrunk_top
             if self.is_in_nursery(obj):
                 hdr.tid |= GC_HASH_TAKEN_NURS
             else:
                 hdr.tid |= GC_HASH_TAKEN_ADDR
             break
     # Now we can return the result
     objsize = self.get_size(obj)
     return self._get_object_hash(obj, objsize, hdr.tid)
Exemplo n.º 59
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     tid = self.get_type_id(addr)
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect = not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)