Beispiel #1
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False  # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size  # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                      "unexpected growth of GC space usage during collect")
            return False  # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True  # success
Beispiel #2
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a + 1)
    assert lt(a + 5, a + 20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a + 19, b)
    assert lt(a, b + 19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Beispiel #3
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False    # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size    # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                         "unexpected growth of GC space usage during collect")
            return False     # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True    # success
Beispiel #4
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a+1)
    assert lt(a+5, a+20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a+19, b)
    assert lt(a, b+19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Beispiel #5
0
    def setup(self):
        #self.total_collection_time = 0.0
        self.total_collection_count = 0

        self.space_size = self.param_space_size
        self.max_space_size = self.param_max_space_size
        self.red_zone = 0

        #self.program_start_time = time.time()
        self.tospace = llarena.arena_malloc(self.space_size, True)
        ll_assert(bool(self.tospace), "couldn't allocate tospace")
        self.top_of_space = self.tospace + self.space_size
        self.fromspace = llarena.arena_malloc(self.space_size, True)
        ll_assert(bool(self.fromspace), "couldn't allocate fromspace")
        self.free = self.tospace
        MovingGCBase.setup(self)
        self.objects_with_finalizers = self.AddressDeque()
        self.objects_with_light_finalizers = self.AddressStack()
        self.objects_with_weakrefs = self.AddressStack()
Beispiel #6
0
    def setup(self):
        #self.total_collection_time = 0.0
        self.total_collection_count = 0

        self.space_size = self.param_space_size
        self.max_space_size = self.param_max_space_size
        self.red_zone = 0

        #self.program_start_time = time.time()
        self.tospace = llarena.arena_malloc(self.space_size, True)
        ll_assert(bool(self.tospace), "couldn't allocate tospace")
        self.top_of_space = self.tospace + self.space_size
        self.fromspace = llarena.arena_malloc(self.space_size, True)
        ll_assert(bool(self.fromspace), "couldn't allocate fromspace")
        self.free = self.tospace
        MovingGCBase.setup(self)
        self.objects_with_finalizers = self.AddressDeque()
        self.objects_with_light_finalizers = self.AddressStack()
        self.objects_with_weakrefs = self.AddressStack()
Beispiel #7
0
 def malloc(self, size):
     nsize = raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     #
     result = llarena.arena_malloc(nsize, False)
     llarena.arena_reserve(result, size)
     self.all_objects.append((result, nsize))
     self.total_memory_used += nsize
     return result
Beispiel #8
0
 def malloc(self, size):
     nsize = raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     #
     result = llarena.arena_malloc(nsize, False)
     llarena.arena_reserve(result, size)
     self.all_objects.append((result, nsize))
     self.total_memory_used += nsize
     return result
Beispiel #9
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Beispiel #10
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a + 1) != a1
    assert not ((a + 1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a + 1) == llmemory.cast_adr_to_int(a1) + 1
Beispiel #11
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a+1) != a1
    assert not ((a+1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
Beispiel #12
0
def test_shrink_obj():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                        ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Beispiel #13
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)

    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b

    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1],
                llmemory.raw_malloc_usage(precomputed_size) * 2, False)
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0],
                llmemory.raw_malloc_usage(precomputed_size) * 2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Beispiel #14
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Beispiel #15
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Beispiel #16
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Beispiel #17
0
 def test_sort(self):
     AddressStack = get_address_stack(chunk_size=15)
     lla = llarena.arena_malloc(10, 2)
     addrs = [lla + i for i in range(10)]
     for _ in range(13):
         ll = AddressStack()
         addr_copy = addrs[:]
         random.shuffle(addr_copy)
         for i in addr_copy:
             ll.append(i)
         ll.sort()
         expected = range(10)
         for i in expected:
             a = ll.pop()
             assert a == addrs[i]
Beispiel #18
0
 def test_sort(self):
     AddressStack = get_address_stack(chunk_size=15)
     lla = llarena.arena_malloc(10, 2)
     addrs = [lla + i for i in range(10)]
     for _ in range(13):
         ll = AddressStack()
         addr_copy = addrs[:]
         random.shuffle(addr_copy)
         for i in addr_copy:
             ll.append(i)
         ll.sort()
         expected = range(10)
         for i in expected:
             a = ll.pop()
             assert a == addrs[i]
Beispiel #19
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Beispiel #20
0
 def fn(argv):
     testrun = int(argv[1])
     a = arena_malloc(65536, False)
     arena_reserve(a, llmemory.sizeof(S))
     p = llmemory.cast_adr_to_ptr(a + 23432, lltype.Ptr(S))
     p.x = 123
     assert p.x == 123
     arena_protect(a, 65536, True)
     result = 0
     if testrun == 1:
         print p.x  # segfault
     if testrun == 2:
         p.x = 124  # segfault
     arena_protect(a, 65536, False)
     p.x += 10
     print p.x
     return 0
Beispiel #21
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)
    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b
    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1], llmemory.raw_malloc_usage(precomputed_size)*2, False)
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0], llmemory.raw_malloc_usage(precomputed_size)*2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Beispiel #22
0
 def fn(argv):
     testrun = int(argv[1])
     a = arena_malloc(65536, False)
     arena_reserve(a, llmemory.sizeof(S))
     p = llmemory.cast_adr_to_ptr(a + 23432, lltype.Ptr(S))
     p.x = 123
     assert p.x == 123
     arena_protect(a, 65536, True)
     result = 0
     if testrun == 1:
         print p.x       # segfault
     if testrun == 2:
         p.x = 124       # segfault
     arena_protect(a, 65536, False)
     p.x += 10
     print p.x
     return 0
Beispiel #23
0
 def allocate_new_arena(self):
     """Loads in self.current_arena the arena to allocate from next."""
     #
     # Pick an arena from 'arenas_lists[i]', with i as small as possible
     # but > 0.  Use caching with 'min_empty_nfreepages', which guarantees
     # that 'arenas_lists[1:min_empty_nfreepages]' are all empty.
     i = self.min_empty_nfreepages
     while i < self.max_pages_per_arena:
         #
         if self.arenas_lists[i] != ARENA_NULL:
             #
             # Found it.
             self.current_arena = self.arenas_lists[i]
             self.arenas_lists[i] = self.current_arena.nextarena
             return
         #
         i += 1
         self.min_empty_nfreepages = i
     #
     # No more arena with any free page.  We must allocate a new arena.
     if not we_are_translated():
         for a in self._all_arenas():
             assert a.nfreepages == 0
     #
     # 'arena_base' points to the start of malloced memory; it might not
     # be a page-aligned address
     arena_base = llarena.arena_malloc(self.arena_size, False)
     if not arena_base:
         raise MemoryError("couldn't allocate the next arena")
     arena_end = arena_base + self.arena_size
     #
     # 'firstpage' points to the first unused page
     firstpage = start_of_page(arena_base + self.page_size - 1,
                               self.page_size)
     # 'npages' is the number of full pages just allocated
     npages = (arena_end - firstpage) // self.page_size
     #
     # Allocate an ARENA object and initialize it
     arena = lltype.malloc(ARENA, flavor='raw', track_allocation=False)
     arena.base = arena_base
     arena.nfreepages = 0        # they are all uninitialized pages
     arena.totalpages = npages
     arena.freepages = firstpage
     self.num_uninitialized_pages = npages
     self.current_arena = arena
Beispiel #24
0
    def allocate_new_arena(self):
        """Loads in self.current_arena the arena to allocate from next."""
        #
        if self._pick_next_arena():
            return
        #
        # Maybe we are incrementally collecting, in which case an arena
        # could have more free pages thrown into it than arenas_lists[]
        # accounts for.  Rehash and retry.
        self._rehash_arenas_lists()
        if self._pick_next_arena():
            return
        #
        # No more arena with any free page.  We must allocate a new arena.
        if not we_are_translated():
            for a in self._all_arenas():
                assert a.nfreepages == 0
        #
        # 'arena_base' points to the start of malloced memory; it might not
        # be a page-aligned address
        arena_base = llarena.arena_malloc(self.arena_size, False)
        self.total_memory_alloced += self.arena_size
        self.peak_memory_alloced = max(self.total_memory_alloced,
                                       self.peak_memory_alloced)

        if not arena_base:
            out_of_memory("out of memory: couldn't allocate the next arena")
        arena_end = arena_base + self.arena_size
        #
        # 'firstpage' points to the first unused page
        firstpage = start_of_page(arena_base + self.page_size - 1,
                                  self.page_size)
        # 'npages' is the number of full pages just allocated
        npages = (arena_end - firstpage) // self.page_size
        #
        # Allocate an ARENA object and initialize it
        arena = lltype.malloc(ARENA, flavor='raw', track_allocation=False)
        arena.base = arena_base
        arena.nfreepages = 0  # they are all uninitialized pages
        arena.totalpages = npages
        arena.freepages = firstpage
        self.num_uninitialized_pages = npages
        self.current_arena = arena
        self.arenas_count += 1
Beispiel #25
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13*ssize, True)
    hdraddr = a + 3*ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
                                    lltype.Ptr(STUB))
    assert stub.t == '!'
Beispiel #26
0
 def allocate_new_arena(self):
     """Loads in self.current_arena the arena to allocate from next."""
     #
     if self._pick_next_arena():
         return
     #
     # Maybe we are incrementally collecting, in which case an arena
     # could have more free pages thrown into it than arenas_lists[]
     # accounts for.  Rehash and retry.
     self._rehash_arenas_lists()
     if self._pick_next_arena():
         return
     #
     # No more arena with any free page.  We must allocate a new arena.
     if not we_are_translated():
         for a in self._all_arenas():
             assert a.nfreepages == 0
     #
     # 'arena_base' points to the start of malloced memory; it might not
     # be a page-aligned address
     arena_base = llarena.arena_malloc(self.arena_size, False)
     if not arena_base:
         out_of_memory("out of memory: couldn't allocate the next arena")
     arena_end = arena_base + self.arena_size
     #
     # 'firstpage' points to the first unused page
     firstpage = start_of_page(arena_base + self.page_size - 1,
                               self.page_size)
     # 'npages' is the number of full pages just allocated
     npages = (arena_end - firstpage) // self.page_size
     #
     # Allocate an ARENA object and initialize it
     arena = lltype.malloc(ARENA, flavor='raw', track_allocation=False)
     arena.base = arena_base
     arena.nfreepages = 0        # they are all uninitialized pages
     arena.totalpages = npages
     arena.freepages = firstpage
     self.num_uninitialized_pages = npages
     self.current_arena = arena
Beispiel #27
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'
Beispiel #28
0
 def f():
     a = llarena.arena_malloc(800, False)
     llarena.arena_reset(a, 800, 2)
     llarena.arena_free(a)
Beispiel #29
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)
Beispiel #30
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)
Beispiel #31
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2 * ssize + 1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(
        ArenaError,
        arena_reserve,
        a + ssize + 1,  # misaligned
        llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y', lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize + 1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a + 1, llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize,
                   llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize + 1,
                   llmemory.sizeof(S), False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Beispiel #32
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2*ssize+1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(ArenaError, arena_reserve, a + ssize + 1,  # misaligned
                   llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y',lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize+1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a+1, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize+1, llmemory.sizeof(S),
                   False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3