Exemplo n.º 1
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False  # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size  # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                      "unexpected growth of GC space usage during collect")
            return False  # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True  # success
Exemplo n.º 2
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False    # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size    # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                         "unexpected growth of GC space usage during collect")
            return False     # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True    # success
Exemplo n.º 3
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Exemplo n.º 4
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty()
             or self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(
         size_of_alive_objs, needed + num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(
         tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs, lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space, lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty()
             or self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space = toaddr
     self.free = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Exemplo n.º 5
0
 def mass_free(self, ok_to_free_func):
     """For each object, if ok_to_free_func(obj) returns True, then free
     the object.
     """
     self.total_memory_used = r_uint(0)
     #
     # For each size class:
     size_class = self.small_request_threshold >> WORD_POWER_2
     while size_class >= 1:
         #
         # Walk the pages in 'page_for_size[size_class]' and
         # 'full_page_for_size[size_class]' and free some objects.
         # Pages completely freed are added to 'page.arena.freepages',
         # and become available for reuse by any size class.  Pages
         # not completely freed are re-chained either in
         # 'full_page_for_size[]' or 'page_for_size[]'.
         self.mass_free_in_pages(size_class, ok_to_free_func)
         #
         size_class -= 1
     #
     # Rehash arenas into the correct arenas_lists[i].  If
     # 'self.current_arena' contains an arena too, it remains there.
     (self.old_arenas_lists, self.arenas_lists) = (self.arenas_lists,
                                                   self.old_arenas_lists)
     #
     i = 0
     while i < self.max_pages_per_arena:
         self.arenas_lists[i] = ARENA_NULL
         i += 1
     #
     i = 0
     while i < self.max_pages_per_arena:
         arena = self.old_arenas_lists[i]
         while arena != ARENA_NULL:
             nextarena = arena.nextarena
             #
             if arena.nfreepages == arena.totalpages:
                 #
                 # The whole arena is empty.  Free it.
                 llarena.arena_free(arena.base)
                 lltype.free(arena, flavor='raw', track_allocation=False)
                 #
             else:
                 # Insert 'arena' in the correct arenas_lists[n]
                 n = arena.nfreepages
                 ll_assert(
                     n < self.max_pages_per_arena,
                     "totalpages != nfreepages >= max_pages_per_arena")
                 arena.nextarena = self.arenas_lists[n]
                 self.arenas_lists[n] = arena
             #
             arena = nextarena
         i += 1
     #
     self.min_empty_nfreepages = 1
Exemplo n.º 6
0
 def mass_free(self, ok_to_free_func):
     """For each object, if ok_to_free_func(obj) returns True, then free
     the object.
     """
     self.total_memory_used = r_uint(0)
     #
     # For each size class:
     size_class = self.small_request_threshold >> WORD_POWER_2
     while size_class >= 1:
         #
         # Walk the pages in 'page_for_size[size_class]' and
         # 'full_page_for_size[size_class]' and free some objects.
         # Pages completely freed are added to 'page.arena.freepages',
         # and become available for reuse by any size class.  Pages
         # not completely freed are re-chained either in
         # 'full_page_for_size[]' or 'page_for_size[]'.
         self.mass_free_in_pages(size_class, ok_to_free_func)
         #
         size_class -= 1
     #
     # Rehash arenas into the correct arenas_lists[i].  If
     # 'self.current_arena' contains an arena too, it remains there.
     (self.old_arenas_lists, self.arenas_lists) = (
         self.arenas_lists, self.old_arenas_lists)
     #
     i = 0
     while i < self.max_pages_per_arena:
         self.arenas_lists[i] = ARENA_NULL
         i += 1
     #
     i = 0
     while i < self.max_pages_per_arena:
         arena = self.old_arenas_lists[i]
         while arena != ARENA_NULL:
             nextarena = arena.nextarena
             #
             if arena.nfreepages == arena.totalpages:
                 #
                 # The whole arena is empty.  Free it.
                 llarena.arena_free(arena.base)
                 lltype.free(arena, flavor='raw', track_allocation=False)
                 #
             else:
                 # Insert 'arena' in the correct arenas_lists[n]
                 n = arena.nfreepages
                 ll_assert(n < self.max_pages_per_arena,
                          "totalpages != nfreepages >= max_pages_per_arena")
                 arena.nextarena = self.arenas_lists[n]
                 self.arenas_lists[n] = arena
             #
             arena = nextarena
         i += 1
     #
     self.min_empty_nfreepages = 1
Exemplo n.º 7
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty()
                or self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True  # executed some finalizers
        else:
            return False  # no finalizer executed
Exemplo n.º 8
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty() or
            self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True      # executed some finalizers
        else:
            return False     # no finalizer executed
Exemplo n.º 9
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 10
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Exemplo n.º 11
0
 def _teardown(self):
     llop.debug_print(lltype.Void, "Teardown")
     llarena.arena_free(self.fromspace)
     llarena.arena_free(self.tospace)
Exemplo n.º 12
0
 def _teardown(self):
     llop.debug_print(lltype.Void, "Teardown")
     llarena.arena_free(self.fromspace)
     llarena.arena_free(self.tospace)
Exemplo n.º 13
0
 def _teardown(self):
     debug_print("Teardown")
     llarena.arena_free(self.fromspace)
     llarena.arena_free(self.tospace)
Exemplo n.º 14
0
 def _teardown(self):
     debug_print("Teardown")
     llarena.arena_free(self.fromspace)
     llarena.arena_free(self.tospace)
Exemplo n.º 15
0
 def f():
     a = llarena.arena_malloc(800, False)
     llarena.arena_reset(a, 800, 2)
     llarena.arena_free(a)