Пример #1
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                   "obtain_free_space failed to do its job")
     if self.nursery:
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "minor collect")
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         self.notify_objects_just_moved()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, True)
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "percent survived:",
                              float(scan - beginning) / self.nursery_size)
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     return self.nursery_free
Пример #2
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                      "obtain_free_space failed to do its job")
     if self.nursery:
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "minor collect")
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         self.notify_objects_just_moved()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, True)
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "percent survived:", float(scan - beginning) / self.nursery_size)
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     return self.nursery_free
Пример #3
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Пример #4
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty()
             or self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(
         size_of_alive_objs, needed + num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(
         tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs, lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space, lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty()
             or self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space = toaddr
     self.free = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Пример #5
0
    def semispace_collect(self, size_changing=False):
        debug_start("gc-collect")
        debug_print()
        debug_print(".----------- Full collection ------------------")
        start_usage = self.free - self.tospace
        debug_print("| used before collection:          ", start_usage, "bytes")
        # start_time = time.time()
        # llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing))

        # Switch the spaces.  We copy everything over to the empty space
        # (self.fromspace at the beginning of the collection), and clear the old
        # one (self.tospace at the beginning).  Their purposes will be reversed
        # for the next collection.
        tospace = self.fromspace
        fromspace = self.tospace
        self.fromspace = fromspace
        self.tospace = tospace
        self.top_of_space = tospace + self.space_size
        scan = self.free = tospace
        self.starting_full_collect()
        self.collect_roots()
        if self.run_finalizers.non_empty():
            self.update_run_finalizers()
        scan = self.scan_copied(scan)
        if self.objects_with_finalizers.non_empty():
            scan = self.deal_with_objects_with_finalizers(scan)
        if self.objects_with_weakrefs.non_empty():
            self.invalidate_weakrefs()
        self.update_objects_with_id()
        self.finished_full_collect()
        self.debug_check_consistency()
        if not size_changing:
            llarena.arena_reset(fromspace, self.space_size, True)
            self.record_red_zone()
            self.execute_finalizers()
        # llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free)
        if have_debug_prints():
            # end_time = time.time()
            # elapsed_time = end_time - start_time
            # self.total_collection_time += elapsed_time
            self.total_collection_count += 1
            # total_program_time = end_time - self.program_start_time
            end_usage = self.free - self.tospace
            debug_print("| used after collection:           ", end_usage, "bytes")
            debug_print("| freed:                           ", start_usage - end_usage, "bytes")
            debug_print("| size of each semispace:          ", self.space_size, "bytes")
            debug_print("| fraction of semispace now used:  ", end_usage * 100.0 / self.space_size, "%")
            # ct = self.total_collection_time
            cc = self.total_collection_count
            debug_print("| number of semispace_collects:    ", cc)
            # debug_print("|                         i.e.:    ",
            #            cc / total_program_time, "per second")
            # debug_print("| total time in semispace_collect: ",
            #            ct, "seconds")
            # debug_print("|                            i.e.: ",
            #            ct * 100.0 / total_program_time, "%")
            debug_print("`----------------------------------------------")
        debug_stop("gc-collect")
Пример #6
0
def test_shrink_obj():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Пример #7
0
def test_shrink_obj():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                        ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Пример #8
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty()
                or self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True  # executed some finalizers
        else:
            return False  # no finalizer executed
Пример #9
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty() or
            self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True      # executed some finalizers
        else:
            return False     # no finalizer executed
Пример #10
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Пример #11
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Пример #12
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0, 
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Пример #13
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)

    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b

    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1],
                llmemory.raw_malloc_usage(precomputed_size) * 2, False)
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0],
                llmemory.raw_malloc_usage(precomputed_size) * 2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Пример #14
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Пример #15
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Пример #16
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0,
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Пример #17
0
def test_partial_arena_reset():
    a = arena_malloc(50, False)
    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b
    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1], llmemory.raw_malloc_usage(precomputed_size)*2, False)
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0], llmemory.raw_malloc_usage(precomputed_size)*2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Пример #18
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0,  "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Пример #19
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0, "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Пример #20
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #21
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #22
0
 def compact(self, resizing):
     fromaddr = self.space
     size_gc_header = self.gcheaderbuilder.size_gc_header
     start = fromaddr
     end = fromaddr
     num = 0
     while fromaddr < self.free:
         obj = fromaddr + size_gc_header
         hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
         objsize = self.get_size_from_backup(obj, num)
         totalsize = size_gc_header + objsize
         if not self.surviving(obj): 
             # this object dies. Following line is a noop in C,
             # we clear it to make debugging easier
             llarena.arena_reset(fromaddr, totalsize, False)
         else:
             ll_assert(self.is_forwarded(obj), "not forwarded, surviving obj")
             forward_ptr = hdr.forward_ptr
             if resizing:
                 end = fromaddr
             val = (self.get_typeid_from_backup(num) << 16) + 1
             hdr.forward_ptr = llmemory.cast_int_to_adr(val)
             if fromaddr != forward_ptr:
                 #llop.debug_print(lltype.Void, "Copying from to",
                 #                 fromaddr, forward_ptr, totalsize)
                 llmemory.raw_memmove(fromaddr, forward_ptr, totalsize)
             if resizing and end - start > GC_CLEARANCE:
                 diff = end - start
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 diff = (diff / GC_CLEARANCE) * GC_CLEARANCE
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 end = start + diff
                 if we_are_translated():
                     # XXX wuaaaaa.... those objects are freed incorrectly
                     #                 here in case of test_gc
                     llarena.arena_reset(start, diff, True)
                 start += diff
         num += 1
         fromaddr += totalsize
Пример #23
0
def test_replace_object_with_stub():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13*ssize, True)
    hdraddr = a + 3*ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
                                    lltype.Ptr(STUB))
    assert stub.t == '!'
Пример #24
0
def test_replace_object_with_stub():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'
Пример #25
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                      "obtain_free_space failed to do its job")
     if self.nursery:
         debug_start("gc-minor")
         debug_print("--- minor collect ---")
         debug_print("nursery:", self.nursery, "to", self.nursery_top)
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         if self.young_objects_with_id.length() > 0:
             self.update_young_objects_with_id()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, 2)
         debug_print("survived (fraction of the size):",
                     float(scan - beginning) / self.nursery_size)
         debug_stop("gc-minor")
         #self.debug_check_consistency()   # -- quite expensive
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     # at this point we know that the nursery is empty
     self.change_nursery_hash_base()
     return self.nursery_free
Пример #26
0
 def compact(self, resizing):
     fromaddr = self.space
     size_gc_header = self.gcheaderbuilder.size_gc_header
     start = fromaddr
     end = fromaddr
     num = 0
     while fromaddr < self.free:
         obj = fromaddr + size_gc_header
         objsize = self.get_size_from_backup(obj, num)
         totalsize = size_gc_header + objsize
         if not self.surviving(obj):
             # this object dies. Following line is a noop in C,
             # we clear it to make debugging easier
             llarena.arena_reset(fromaddr, totalsize, False)
         else:
             if resizing:
                 end = fromaddr
             forward_obj = self.get_header_forwarded_addr(obj)
             self.restore_normal_header(obj, num)
             if obj != forward_obj:
                 #llop.debug_print(lltype.Void, "Copying from to",
                 #                 fromaddr, forward_ptr, totalsize)
                 llmemory.raw_memmove(fromaddr,
                                      forward_obj - size_gc_header,
                                      totalsize)
             if resizing and end - start > GC_CLEARANCE:
                 diff = end - start
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 diff = (diff / GC_CLEARANCE) * GC_CLEARANCE
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 end = start + diff
                 if we_are_translated():
                     # XXX wuaaaaa.... those objects are freed incorrectly
                     #                 here in case of test_gc
                     llarena.arena_reset(start, diff, True)
                 start += diff
         num += 1
         fromaddr += totalsize
Пример #27
0
 def compact(self, resizing):
     fromaddr = self.space
     size_gc_header = self.gcheaderbuilder.size_gc_header
     start = fromaddr
     end = fromaddr
     num = 0
     while fromaddr < self.free:
         obj = fromaddr + size_gc_header
         objsize = self.get_size_from_backup(obj, num)
         totalsize = size_gc_header + objsize
         if not self.surviving(obj): 
             # this object dies. Following line is a noop in C,
             # we clear it to make debugging easier
             llarena.arena_reset(fromaddr, totalsize, False)
         else:
             if resizing:
                 end = fromaddr
             forward_obj = self.get_header_forwarded_addr(obj)
             self.restore_normal_header(obj, num)
             if obj != forward_obj:
                 #llop.debug_print(lltype.Void, "Copying from to",
                 #                 fromaddr, forward_ptr, totalsize)
                 llmemory.raw_memmove(fromaddr,
                                      forward_obj - size_gc_header,
                                      totalsize)
             if resizing and end - start > GC_CLEARANCE:
                 diff = end - start
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 diff = (diff / GC_CLEARANCE) * GC_CLEARANCE
                 #llop.debug_print(lltype.Void, "Cleaning", start, diff)
                 end = start + diff
                 if we_are_translated():
                     # XXX wuaaaaa.... those objects are freed incorrectly
                     #                 here in case of test_gc
                     llarena.arena_reset(start, diff, True)
                 start += diff
         num += 1
         fromaddr += totalsize
Пример #28
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                   "obtain_free_space failed to do its job")
     if self.nursery:
         debug_start("gc-minor")
         debug_print("--- minor collect ---")
         debug_print("nursery:", self.nursery, "to", self.nursery_top)
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         if self.young_objects_with_id.length() > 0:
             self.update_young_objects_with_id()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, 2)
         debug_print("survived (fraction of the size):",
                     float(scan - beginning) / self.nursery_size)
         debug_stop("gc-minor")
         #self.debug_check_consistency()   # -- quite expensive
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     # at this point we know that the nursery is empty
     self.change_nursery_hash_base()
     return self.nursery_free
Пример #29
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0  # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Пример #30
0
 def semispace_collect(self, size_changing=False):
     if self.config.gcconfig.debugprint:
         llop.debug_print(lltype.Void)
         llop.debug_print(lltype.Void,
                          ".----------- Full collection ------------------")
         start_usage = self.free - self.tospace
         llop.debug_print(lltype.Void,
                          "| used before collection:          ",
                          start_usage, "bytes")
         start_time = time.time()
     else:
         start_time = 0 # Help the flow space
         start_usage = 0 # Help the flow space
     #llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing))
     tospace = self.fromspace
     fromspace = self.tospace
     self.fromspace = fromspace
     self.tospace = tospace
     self.top_of_space = tospace + self.space_size
     scan = self.free = tospace
     self.starting_full_collect()
     self.collect_roots()
     if self.run_finalizers.non_empty():
         self.update_run_finalizers()
     scan = self.scan_copied(scan)
     if self.objects_with_finalizers.non_empty():
         scan = self.deal_with_objects_with_finalizers(scan)
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs()
     self.update_objects_with_id()
     self.finished_full_collect()
     self.debug_check_consistency()
     if not size_changing:
         llarena.arena_reset(fromspace, self.space_size, True)
         self.record_red_zone()
         self.execute_finalizers()
     #llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free)
     if self.config.gcconfig.debugprint:
         end_time = time.time()
         elapsed_time = end_time - start_time
         self.total_collection_time += elapsed_time
         self.total_collection_count += 1
         total_program_time = end_time - self.program_start_time
         end_usage = self.free - self.tospace
         llop.debug_print(lltype.Void,
                          "| used after collection:           ",
                          end_usage, "bytes")
         llop.debug_print(lltype.Void,
                          "| freed:                           ",
                          start_usage - end_usage, "bytes")
         llop.debug_print(lltype.Void,
                          "| size of each semispace:          ",
                          self.space_size, "bytes")
         llop.debug_print(lltype.Void,
                          "| fraction of semispace now used:  ",
                          end_usage * 100.0 / self.space_size, "%")
         ct = self.total_collection_time
         cc = self.total_collection_count
         llop.debug_print(lltype.Void,
                          "| number of semispace_collects:    ",
                          cc)
         llop.debug_print(lltype.Void,
                          "|                         i.e.:    ",
                          cc / total_program_time, "per second")
         llop.debug_print(lltype.Void,
                          "| total time in semispace_collect: ",
                          ct, "seconds")
         llop.debug_print(lltype.Void,
                          "|                            i.e.: ",
                          ct * 100.0 / total_program_time, "%")
         llop.debug_print(lltype.Void,
                          "`----------------------------------------------")
Пример #31
0
def test_arena():
    S = lltype.Struct('S', ('x',lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2*ssize+1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(ArenaError, arena_reserve, a + ssize + 1,  # misaligned
                   llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y',lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize+1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a+1, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize+1, llmemory.sizeof(S),
                   False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Пример #32
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0    # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Пример #33
0
 def semispace_collect(self, size_changing=False):
     if DEBUG_PRINT:
         import time
         llop.debug_print(lltype.Void)
         llop.debug_print(
             lltype.Void, ".----------- Full collection ------------------")
         start_usage = self.free - self.tospace
         llop.debug_print(lltype.Void,
                          "| used before collection:          ",
                          start_usage, "bytes")
         start_time = time.time()
     #llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing))
     tospace = self.fromspace
     fromspace = self.tospace
     self.fromspace = fromspace
     self.tospace = tospace
     self.top_of_space = tospace + self.space_size
     scan = self.free = tospace
     self.starting_full_collect()
     self.collect_roots()
     if self.run_finalizers.non_empty():
         self.update_run_finalizers()
     scan = self.scan_copied(scan)
     if self.objects_with_finalizers.non_empty():
         scan = self.deal_with_objects_with_finalizers(scan)
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs()
     self.update_objects_with_id()
     self.finished_full_collect()
     self.debug_check_consistency()
     if not size_changing:
         llarena.arena_reset(fromspace, self.space_size, True)
         self.record_red_zone()
         self.execute_finalizers()
     #llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free)
     if DEBUG_PRINT:
         end_time = time.time()
         elapsed_time = end_time - start_time
         self.total_collection_time += elapsed_time
         self.total_collection_count += 1
         total_program_time = end_time - self.program_start_time
         end_usage = self.free - self.tospace
         llop.debug_print(lltype.Void,
                          "| used after collection:           ", end_usage,
                          "bytes")
         llop.debug_print(lltype.Void,
                          "| freed:                           ",
                          start_usage - end_usage, "bytes")
         llop.debug_print(lltype.Void,
                          "| size of each semispace:          ",
                          self.space_size, "bytes")
         llop.debug_print(lltype.Void,
                          "| fraction of semispace now used:  ",
                          end_usage * 100.0 / self.space_size, "%")
         ct = self.total_collection_time
         cc = self.total_collection_count
         llop.debug_print(lltype.Void,
                          "| number of semispace_collects:    ", cc)
         llop.debug_print(lltype.Void,
                          "|                         i.e.:    ",
                          cc / total_program_time, "per second")
         llop.debug_print(lltype.Void,
                          "| total time in semispace_collect: ", ct,
                          "seconds")
         llop.debug_print(lltype.Void,
                          "|                            i.e.: ",
                          ct * 100.0 / total_program_time, "%")
         llop.debug_print(
             lltype.Void, "`----------------------------------------------")
Пример #34
0
 def f():
     a = llarena.arena_malloc(800, False)
     llarena.arena_reset(a, 800, 2)
     llarena.arena_free(a)
Пример #35
0
    def semispace_collect(self, size_changing=False):
        if self.config.gcconfig.debugprint:
            llop.debug_print(lltype.Void)
            llop.debug_print(
                lltype.Void, ".----------- Full collection ------------------")
            start_usage = self.free - self.tospace
            llop.debug_print(lltype.Void,
                             "| used before collection:          ",
                             start_usage, "bytes")
            start_time = time.time()
        else:
            start_time = 0  # Help the flow space
            start_usage = 0  # Help the flow space
        #llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing))

        # Switch the spaces.  We copy everything over to the empty space
        # (self.fromspace at the beginning of the collection), and clear the old
        # one (self.tospace at the beginning).  Their purposes will be reversed
        # for the next collection.
        tospace = self.fromspace
        fromspace = self.tospace
        self.fromspace = fromspace
        self.tospace = tospace
        self.top_of_space = tospace + self.space_size
        scan = self.free = tospace
        self.starting_full_collect()
        self.collect_roots()
        if self.run_finalizers.non_empty():
            self.update_run_finalizers()
        scan = self.scan_copied(scan)
        if self.objects_with_finalizers.non_empty():
            scan = self.deal_with_objects_with_finalizers(scan)
        if self.objects_with_weakrefs.non_empty():
            self.invalidate_weakrefs()
        self.update_objects_with_id()
        self.finished_full_collect()
        self.debug_check_consistency()
        if not size_changing:
            llarena.arena_reset(fromspace, self.space_size, True)
            self.record_red_zone()
            self.execute_finalizers()
        #llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free)
        if self.config.gcconfig.debugprint:
            end_time = time.time()
            elapsed_time = end_time - start_time
            self.total_collection_time += elapsed_time
            self.total_collection_count += 1
            total_program_time = end_time - self.program_start_time
            end_usage = self.free - self.tospace
            llop.debug_print(lltype.Void,
                             "| used after collection:           ", end_usage,
                             "bytes")
            llop.debug_print(lltype.Void,
                             "| freed:                           ",
                             start_usage - end_usage, "bytes")
            llop.debug_print(lltype.Void,
                             "| size of each semispace:          ",
                             self.space_size, "bytes")
            llop.debug_print(lltype.Void,
                             "| fraction of semispace now used:  ",
                             end_usage * 100.0 / self.space_size, "%")
            ct = self.total_collection_time
            cc = self.total_collection_count
            llop.debug_print(lltype.Void,
                             "| number of semispace_collects:    ", cc)
            llop.debug_print(lltype.Void,
                             "|                         i.e.:    ",
                             cc / total_program_time, "per second")
            llop.debug_print(lltype.Void,
                             "| total time in semispace_collect: ", ct,
                             "seconds")
            llop.debug_print(lltype.Void,
                             "|                            i.e.: ",
                             ct * 100.0 / total_program_time, "%")
            llop.debug_print(
                lltype.Void, "`----------------------------------------------")
Пример #36
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2 * ssize + 1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(
        ArenaError,
        arena_reserve,
        a + ssize + 1,  # misaligned
        llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y', lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize + 1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a + 1, llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize,
                   llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize + 1,
                   llmemory.sizeof(S), False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3