Exemplo n.º 1
0
 def malloc_fixedsize_clear(self,
                            typeid16,
                            size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         from rpython.rtyper.lltypesystem import rffi
         self.objects_with_finalizers.append(result + size_gc_header)
         self.objects_with_finalizers.append(rffi.cast(
             llmemory.Address, -1))
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 2
0
 def malloc_fixedsize_clear(self, typeid, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 3
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a+1)
    assert lt(a+5, a+20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a+19, b)
    assert lt(a, b+19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Exemplo n.º 4
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a + 1)
    assert lt(a + 5, a + 20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a + 19, b)
    assert lt(a, b + 19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Exemplo n.º 5
0
 def malloc_fixedsize_clear(self, typeid, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 6
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(
         raw_malloc_usage(totalsize) <= self.nursery_top - result,
         "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
     return result
Exemplo n.º 7
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Exemplo n.º 8
0
 def malloc(self, size):
     nsize = raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     #
     result = llarena.arena_malloc(nsize, False)
     llarena.arena_reserve(result, size)
     self.all_objects.append((result, nsize))
     self.total_memory_used += nsize
     return result
Exemplo n.º 9
0
 def malloc(self, size):
     nsize = raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     #
     result = llarena.arena_malloc(nsize, False)
     llarena.arena_reserve(result, size)
     self.all_objects.append((result, nsize))
     self.total_memory_used += nsize
     return result
Exemplo n.º 10
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Exemplo n.º 11
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a+1) != a1
    assert not ((a+1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
Exemplo n.º 12
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a + 1) != a1
    assert not ((a + 1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a + 1) == llmemory.cast_adr_to_int(a1) + 1
Exemplo n.º 13
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                        ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Exemplo n.º 14
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Exemplo n.º 15
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0, 
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Exemplo n.º 16
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Exemplo n.º 17
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Exemplo n.º 18
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Exemplo n.º 19
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 20
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 21
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0,
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Exemplo n.º 22
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                          offset_to_length):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 23
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize,
                               llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Exemplo n.º 24
0
 def fn(argv):
     testrun = int(argv[1])
     a = arena_malloc(65536, False)
     arena_reserve(a, llmemory.sizeof(S))
     p = llmemory.cast_adr_to_ptr(a + 23432, lltype.Ptr(S))
     p.x = 123
     assert p.x == 123
     arena_protect(a, 65536, True)
     result = 0
     if testrun == 1:
         print p.x  # segfault
     if testrun == 2:
         p.x = 124  # segfault
     arena_protect(a, 65536, False)
     p.x += 10
     print p.x
     return 0
Exemplo n.º 25
0
 def fn(argv):
     testrun = int(argv[1])
     a = arena_malloc(65536, False)
     arena_reserve(a, llmemory.sizeof(S))
     p = llmemory.cast_adr_to_ptr(a + 23432, lltype.Ptr(S))
     p.x = 123
     assert p.x == 123
     arena_protect(a, 65536, True)
     result = 0
     if testrun == 1:
         print p.x       # segfault
     if testrun == 2:
         p.x = 124       # segfault
     arena_protect(a, 65536, False)
     p.x += 10
     print p.x
     return 0
Exemplo n.º 26
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize,
                               llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Exemplo n.º 27
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0,  "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Exemplo n.º 28
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                          offset_to_length):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Exemplo n.º 29
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0, "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Exemplo n.º 30
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))

            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize
             and raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                        llmemory.GCREF)
Exemplo n.º 31
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))
            
            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize and
             raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 32
0
 def malloc_fixedsize_clear(self, typeid16, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Exemplo n.º 33
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Exemplo n.º 34
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Exemplo n.º 35
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2*nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks*step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks*step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Exemplo n.º 36
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2 * nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks * step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks * step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec('%s = holeaddr' % prev, globals(), locals())
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
             exec('%s = endaddr' % prev, globals(), locals())
     assert ac._nuninitialized(page, size_class) == nuninitialized
Exemplo n.º 37
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13*ssize, True)
    hdraddr = a + 3*ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
                                    lltype.Ptr(STUB))
    assert stub.t == '!'
Exemplo n.º 38
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result+size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Exemplo n.º 39
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'
Exemplo n.º 40
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Exemplo n.º 41
0
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
    assert " " not in pagelayout.rstrip(" ")
    nb_pages = len(pagelayout)
    arenasize = pagesize * (nb_pages + 1) - 1
    ac = ArenaCollection(arenasize, pagesize, 9*WORD)
    #
    def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
        assert step in (1, 2)
        llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
        page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
        if step == 1:
            page.nfree = 0
            nuninitialized = nblocks - nusedblocks
        else:
            page.nfree = nusedblocks
            nuninitialized = nblocks - 2*nusedblocks
        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
        if nusedblocks < nblocks:
            chainedlists = ac.page_for_size
        else:
            chainedlists = ac.full_page_for_size
        page.nextpage = chainedlists[size_class]
        page.arena = ac.current_arena
        chainedlists[size_class] = page
        if fill_with_objects:
            for i in range(0, nusedblocks*step, step):
                objaddr = pageaddr + hdrsize + i * size_block
                llarena.arena_reserve(objaddr, _dummy_size(size_block))
            if step == 2:
                prev = 'page.freeblock'
                for i in range(1, nusedblocks*step, step):
                    holeaddr = pageaddr + hdrsize + i * size_block
                    llarena.arena_reserve(holeaddr,
                                          llmemory.sizeof(llmemory.Address))
                    exec '%s = holeaddr' % prev in globals(), locals()
                    prevhole = holeaddr
                    prev = 'prevhole.address[0]'
                endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
                exec '%s = endaddr' % prev in globals(), locals()
        assert ac._nuninitialized(page, size_class) == nuninitialized
    #
    ac.allocate_new_arena()
    num_initialized_pages = len(pagelayout.rstrip(" "))
    ac._startpageaddr = ac.current_arena.freepages
    if pagelayout.endswith(" "):
        ac.current_arena.freepages += pagesize * num_initialized_pages
    else:
        ac.current_arena.freepages = NULL
    ac.num_uninitialized_pages -= num_initialized_pages
    #
    for i in reversed(range(num_initialized_pages)):
        pageaddr = pagenum(ac, i)
        c = pagelayout[i]
        if '1' <= c <= '9':   # a partially used page (1 block free)
            size_class = int(c)
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks-1)
        elif c == '.':    # a free, but initialized, page
            llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
            pageaddr.address[0] = ac.current_arena.freepages
            ac.current_arena.freepages = pageaddr
            ac.current_arena.nfreepages += 1
        elif c == '#':    # a random full page, in the list 'full_pages'
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks)
        elif c == '/':    # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks // 3,
                 step=2)
    #
    ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
    return ac
Exemplo n.º 42
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)
Exemplo n.º 43
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2*ssize+1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(ArenaError, arena_reserve, a + ssize + 1,  # misaligned
                   llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y',lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize+1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a+1, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize+1, llmemory.sizeof(S),
                   False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Exemplo n.º 44
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0    # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Exemplo n.º 45
0
 def reserve(i):
     b = a + i * llmemory.raw_malloc_usage(precomputed_size)
     arena_reserve(b, precomputed_size)
     return b
Exemplo n.º 46
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2 * ssize + 1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(
        ArenaError,
        arena_reserve,
        a + ssize + 1,  # misaligned
        llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y', lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize + 1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a + 1, llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize,
                   llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize + 1,
                   llmemory.sizeof(S), False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Exemplo n.º 47
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)
Exemplo n.º 48
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0  # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Exemplo n.º 49
0
 def reserve(i):
     b = a + i * llmemory.raw_malloc_usage(precomputed_size)
     arena_reserve(b, precomputed_size)
     return b
Exemplo n.º 50
0
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
    assert " " not in pagelayout.rstrip(" ")
    nb_pages = len(pagelayout)
    arenasize = pagesize * (nb_pages + 1) - 1
    ac = ArenaCollection(arenasize, pagesize, 9 * WORD)

    #
    def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
        assert step in (1, 2)
        llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
        page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
        if step == 1:
            page.nfree = 0
            nuninitialized = nblocks - nusedblocks
        else:
            page.nfree = nusedblocks
            nuninitialized = nblocks - 2 * nusedblocks
        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
        if nusedblocks < nblocks:
            chainedlists = ac.page_for_size
        else:
            chainedlists = ac.full_page_for_size
        page.nextpage = chainedlists[size_class]
        page.arena = ac.current_arena
        chainedlists[size_class] = page
        if fill_with_objects:
            for i in range(0, nusedblocks * step, step):
                objaddr = pageaddr + hdrsize + i * size_block
                llarena.arena_reserve(objaddr, _dummy_size(size_block))
            if step == 2:
                prev = 'page.freeblock'
                for i in range(1, nusedblocks * step, step):
                    holeaddr = pageaddr + hdrsize + i * size_block
                    llarena.arena_reserve(holeaddr,
                                          llmemory.sizeof(llmemory.Address))
                    exec('%s = holeaddr' % prev, globals(), locals())
                    prevhole = holeaddr
                    prev = 'prevhole.address[0]'
                endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
                exec('%s = endaddr' % prev, globals(), locals())
        assert ac._nuninitialized(page, size_class) == nuninitialized

    #
    ac.allocate_new_arena()
    num_initialized_pages = len(pagelayout.rstrip(" "))
    ac._startpageaddr = ac.current_arena.freepages
    if pagelayout.endswith(" "):
        ac.current_arena.freepages += pagesize * num_initialized_pages
    else:
        ac.current_arena.freepages = NULL
    ac.num_uninitialized_pages -= num_initialized_pages
    #
    for i in reversed(range(num_initialized_pages)):
        pageaddr = pagenum(ac, i)
        c = pagelayout[i]
        if '1' <= c <= '9':  # a partially used page (1 block free)
            size_class = int(c)
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks - 1)
        elif c == '.':  # a free, but initialized, page
            llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
            pageaddr.address[0] = ac.current_arena.freepages
            ac.current_arena.freepages = pageaddr
            ac.current_arena.nfreepages += 1
        elif c == '#':  # a random full page, in the list 'full_pages'
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks)
        elif c == '/':  # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr,
                 size_class,
                 size_block,
                 nblocks,
                 nblocks // 3,
                 step=2)
    #
    ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
    return ac