Ejemplo n.º 1
0
 def f():
     adr = llmemory.raw_malloc(sizeofs)
     s = llmemory.cast_adr_to_ptr(adr, STRUCTPTR)
     s.y = 5  # does not crash
     result = (adr + offsety).signed[0] * 10 + int(offsety < sizeofs)
     llmemory.raw_free(adr)
     return result
Ejemplo n.º 2
0
 def f():
     adr = llmemory.raw_malloc(sizeofs)
     s = llmemory.cast_adr_to_ptr(adr, STRUCTPTR)
     s.y = 5 # does not crash
     result = (adr + offsety).signed[0] * 10 + int(offsety < sizeofs)
     llmemory.raw_free(adr)
     return result
Ejemplo n.º 3
0
 def f():
     addr = raw_malloc(INT_SIZE*100)
     ll = AddressStack()
     ll.append(addr)
     ll.append(addr + INT_SIZE*1)
     ll.append(addr + INT_SIZE*2)
     a = ll.pop()
     res = (a - INT_SIZE*2 == addr)
     a = ll.pop()
     res = res and (a - INT_SIZE*1 == addr)
     res = res and ll.non_empty()
     a = ll.pop()
     res = res and a == addr
     res = res and not ll.non_empty()
     ll.append(addr)
     for i in range(300):
         ll.append(addr + INT_SIZE*i)
     for i in range(299, -1, -1):
         a = ll.pop()
         res = res and (a - INT_SIZE*i == addr)
     for i in range(300):
         ll.append(addr + INT_SIZE*i)
     for i in range(299, -1, -1):
         a = ll.pop()
         res = res and (a - INT_SIZE*i == addr)
     ll.delete()
     ll = AddressStack()
     ll.append(addr)
     ll.append(addr + INT_SIZE*1)
     ll.append(addr + INT_SIZE*2)
     ll.delete()
     raw_free(addr)
     return res
Ejemplo n.º 4
0
 def f():
     addr = raw_malloc(INT_SIZE*100)
     ll = AddressStack()
     ll.append(addr)
     ll.append(addr + INT_SIZE*1)
     ll.append(addr + INT_SIZE*2)
     a = ll.pop()
     res = (a - INT_SIZE*2 == addr)
     a = ll.pop()
     res = res and (a - INT_SIZE*1 == addr)
     res = res and ll.non_empty()
     a = ll.pop()
     res = res and a == addr
     res = res and not ll.non_empty()
     ll.append(addr)
     for i in range(300):
         ll.append(addr + INT_SIZE*i)
     for i in range(299, -1, -1):
         a = ll.pop()
         res = res and (a - INT_SIZE*i == addr)
     for i in range(300):
         ll.append(addr + INT_SIZE*i)
     for i in range(299, -1, -1):
         a = ll.pop()
         res = res and (a - INT_SIZE*i == addr)
     ll.delete()
     ll = AddressStack()
     ll.append(addr)
     ll.append(addr + INT_SIZE*1)
     ll.append(addr + INT_SIZE*2)
     ll.delete()
     raw_free(addr)
     return res
Ejemplo n.º 5
0
 def f(offset, char):
     char = chr(char)
     addr = llmemory.raw_malloc(10000)
     same_offset = (addr + 2 * offset - offset) - addr
     addr.char[offset] = char
     result = (addr + same_offset).char[0]
     llmemory.raw_free(addr)
     return ord(result)
Ejemplo n.º 6
0
 def f(offset, char):
     char = chr(char)
     addr = llmemory.raw_malloc(10000)
     same_offset = (addr + 2 * offset - offset) - addr 
     addr.char[offset] = char
     result = (addr + same_offset).char[0]
     llmemory.raw_free(addr)
     return ord(result)
Ejemplo n.º 7
0
 def f():
     addr = llmemory.raw_malloc(100)
     addr.signed[0] = 12
     (addr + 10).signed[0] = 42
     (addr + 20).char[0] = "a"
     addr1 = llmemory.raw_malloc(100)
     llmemory.raw_memcopy(addr, addr1, 100)
     result = addr1.signed[0] == 12
     result = result and (addr1 + 10).signed[0] == 42
     result = result and (addr1 + 20).char[0] == "a"
     llmemory.raw_free(addr)
     llmemory.raw_free(addr1)
     return result
Ejemplo n.º 8
0
 def f():
     addr = llmemory.raw_malloc(100)
     addr.signed[0] = 12
     (addr + 10).signed[0] = 42
     (addr + 20).char[0] = "a"
     addr1 = llmemory.raw_malloc(100)
     llmemory.raw_memcopy(addr, addr1, 100)
     result = addr1.signed[0] == 12
     result = result and (addr1 + 10).signed[0] == 42
     result = result and (addr1 + 20).char[0] == "a"
     llmemory.raw_free(addr)
     llmemory.raw_free(addr1)
     return result
Ejemplo n.º 9
0
 def thread_die():
     """Called just before the final GIL release done by a dying
     thread.  After a thread_die(), no more gc operation should
     occur in this thread.
     """
     aid = get_aid()
     gcdata.thread_stacks.setitem(aid, llmemory.NULL)
     old = gcdata.root_stack_base
     if gcdata._fresh_rootstack == llmemory.NULL:
         gcdata._fresh_rootstack = old
     else:
         llmemory.raw_free(old)
     install_new_stack(gcdata.main_thread)
     # from time to time, rehash the dictionary to remove
     # old NULL entries
     gcdata.dead_threads_count += 1
     if (gcdata.dead_threads_count & 511) == 0:
         gcdata.thread_stacks = copy_without_null_values(
             gcdata.thread_stacks)
Ejemplo n.º 10
0
 def thread_die():
     """Called just before the final GIL release done by a dying
     thread.  After a thread_die(), no more gc operation should
     occur in this thread.
     """
     aid = get_aid()
     gcdata.thread_stacks.setitem(aid, llmemory.NULL)
     old = gcdata.root_stack_base
     if gcdata._fresh_rootstack == llmemory.NULL:
         gcdata._fresh_rootstack = old
     else:
         llmemory.raw_free(old)
     install_new_stack(gcdata.main_thread)
     # from time to time, rehash the dictionary to remove
     # old NULL entries
     gcdata.dead_threads_count += 1
     if (gcdata.dead_threads_count & 511) == 0:
         gcdata.thread_stacks = copy_without_null_values(
             gcdata.thread_stacks)
Ejemplo n.º 11
0
 def test_big_access(self):
     AddressStack = get_address_stack()
     addrs = [raw_malloc(llmemory.sizeof(lltype.Signed))
              for i in range(3000)]
     ll = AddressStack()
     for i in range(3000):
         print i
         ll.append(addrs[i])
     for i in range(3000)[::-1]:
         a = ll.pop()
         assert a == addrs[i]
     for i in range(3000):
         print i
         ll.append(addrs[i])
     for i in range(3000)[::-1]:
         a = ll.pop()
         assert a == addrs[i]
     ll.delete()
     for addr in addrs:
         raw_free(addr)
Ejemplo n.º 12
0
 def test_big_access(self):
     AddressStack = get_address_stack()
     addrs = [raw_malloc(llmemory.sizeof(lltype.Signed))
              for i in range(3000)]
     ll = AddressStack()
     for i in range(3000):
         print i
         ll.append(addrs[i])
     for i in range(3000)[::-1]:
         a = ll.pop()
         assert a == addrs[i]
     for i in range(3000):
         print i
         ll.append(addrs[i])
     for i in range(3000)[::-1]:
         a = ll.pop()
         assert a == addrs[i]
     ll.delete()
     for addr in addrs:
         raw_free(addr)
Ejemplo n.º 13
0
 def test_simple_access(self):
     AddressStack = get_address_stack()
     addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr2
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr1
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr0
     assert not ll.non_empty()
     ll.append(addr0)
     ll.delete()
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     ll.append(NULL)
     a = ll.pop()
     assert a == NULL
     ll.delete()
     raw_free(addr2)
     raw_free(addr1)
     raw_free(addr0)
Ejemplo n.º 14
0
 def test_simple_access(self):
     AddressStack = get_address_stack()
     addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
     addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr2
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr1
     assert ll.non_empty()
     a = ll.pop()
     assert a == addr0
     assert not ll.non_empty()
     ll.append(addr0)
     ll.delete()
     ll = AddressStack()
     ll.append(addr0)
     ll.append(addr1)
     ll.append(addr2)
     ll.append(NULL)
     a = ll.pop()
     assert a == NULL
     ll.delete()
     raw_free(addr2)
     raw_free(addr1)
     raw_free(addr0)
Ejemplo n.º 15
0
    def collect(self, gen=0):
        # 1. mark from the roots, and also the objects that objects-with-del
        #    point to (using the list of malloced_objects_with_finalizer)
        # 2. walk the list of objects-without-del and free the ones not marked
        # 3. walk the list of objects-with-del and for the ones not marked:
        #    call __del__, move the object to the list of object-without-del
        import time
        from pypy.rpython.lltypesystem.lloperation import llop
        debug_start("gc-collect")
        start_time = time.time()
        self.collect_in_progress = True
        size_gc_header = self.gcheaderbuilder.size_gc_header
##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
##                        size_gc_header)

        # push the roots on the mark stack
        objects = self.AddressStack() # mark stack
        self._mark_stack = objects
        self.root_walker.walk_roots(
            MarkSweepGC._mark_root,  # stack roots
            MarkSweepGC._mark_root,  # static in prebuilt non-gc structures
            MarkSweepGC._mark_root)  # static in prebuilt gc objects

        # from this point onwards, no more mallocs should be possible
        old_malloced = self.bytes_malloced
        self.bytes_malloced = 0
        curr_heap_size = 0
        freed_size = 0

        # mark objects reachable by objects with a finalizer, but not those
        # themselves. add their size to curr_heap_size, since they always
        # survive the collection
        hdr = self.malloced_objects_with_finalizer
        while hdr:
            next = hdr.next
            typeid = hdr.typeid16
            gc_info = llmemory.cast_ptr_to_adr(hdr)
            obj = gc_info + size_gc_header
            if not hdr.mark:
                self.add_reachable_to_stack(obj, objects)
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            if self.is_varsize(typeid):
                length = (obj + self.varsize_offset_to_length(typeid)).signed[0]
                size += self.varsize_item_sizes(typeid) * length
            estimate = raw_malloc_usage(size_gc_header + size)
            curr_heap_size += estimate
            hdr = next

        # mark thinks on the mark stack and put their descendants onto the
        # stack until the stack is empty
        while objects.non_empty():  #mark
            curr = objects.pop()
            gc_info = curr - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            if hdr.mark:
                continue
            self.add_reachable_to_stack(curr, objects)
            hdr.mark = True
        objects.delete()
        # also mark self.curpool
        if self.curpool:
            gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            hdr.mark = True
        # go through the list of objects containing weak pointers
        # and kill the links if they go to dead objects
        # if the object itself is not marked, free it
        hdr = self.objects_with_weak_pointers
        surviving = lltype.nullptr(self.HDR)
        while hdr:
            typeid = hdr.typeid16
            next = hdr.next
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            estimate = raw_malloc_usage(size_gc_header + size)
            if hdr.mark:
                offset = self.weakpointer_offset(typeid)
                hdr.mark = False
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                pointing_to = (weakref_obj + offset).address[0]
                if pointing_to:
                    gc_info_pointing_to = pointing_to - size_gc_header
                    hdr_pointing_to = llmemory.cast_adr_to_ptr(
                        gc_info_pointing_to, self.HDRPTR)
                    # pointed to object will die
                    # XXX what to do if the object has a finalizer which resurrects
                    # the object?
                    if not hdr_pointing_to.mark:
                        (weakref_obj + offset).address[0] = NULL
                hdr.next = surviving
                surviving = hdr
                curr_heap_size += estimate
            else:
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                self.write_free_statistics(typeid, weakref_obj)
                freed_size += estimate
                raw_free(addr)
            hdr = next
        self.objects_with_weak_pointers = surviving
        # sweep: delete objects without del if they are not marked
        # unmark objects without del that are marked
        firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
        firstpoolnode.linkedlist = self.malloced_objects
        firstpoolnode.nextnode = self.poolnodes
        prevpoolnode = lltype.nullptr(self.POOLNODE)
        poolnode = firstpoolnode
        while poolnode:   #sweep
            ppnext = llmemory.cast_ptr_to_adr(poolnode)
            ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
            hdr = poolnode.linkedlist
            while hdr:  #sweep
                typeid = hdr.typeid16
                next = hdr.next
                addr = llmemory.cast_ptr_to_adr(hdr)
                size = self.fixed_size(typeid)
                if self.is_varsize(typeid):
                    length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
                    size += self.varsize_item_sizes(typeid) * length
                estimate = raw_malloc_usage(size_gc_header + size)
                if hdr.mark:
                    hdr.mark = False
                    ppnext.address[0] = addr
                    ppnext = llmemory.cast_ptr_to_adr(hdr)
                    ppnext += llmemory.offsetof(self.HDR, 'next')
                    curr_heap_size += estimate
                else:
                    gc_info = llmemory.cast_ptr_to_adr(hdr)
                    obj = gc_info + size_gc_header
                    self.write_free_statistics(typeid, obj)
                    freed_size += estimate
                    raw_free(addr)
                hdr = next
            ppnext.address[0] = llmemory.NULL
            next = poolnode.nextnode
            if not poolnode.linkedlist and prevpoolnode:
                # completely empty node
                prevpoolnode.nextnode = next
                lltype.free(poolnode, flavor='raw')
            else:
                prevpoolnode = poolnode
            poolnode = next
        self.malloced_objects = firstpoolnode.linkedlist
        self.poolnodes = firstpoolnode.nextnode
        lltype.free(firstpoolnode, flavor='raw')
        #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)

        end_time = time.time()
        compute_time = start_time - self.prev_collect_end_time
        collect_time = end_time - start_time

        garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)

        if (collect_time * curr_heap_size >
            0.02 * garbage_collected * compute_time): 
            self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
        if (collect_time * curr_heap_size <
            0.005 * garbage_collected * compute_time):
            self.bytes_malloced_threshold /= 2

        # Use atleast as much memory as current live objects.
        if curr_heap_size > self.bytes_malloced_threshold:
            self.bytes_malloced_threshold = curr_heap_size

        # Cap at 1/4 GB
        self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
                                            256 * 1024 * 1024)
        self.total_collection_time += collect_time
        self.prev_collect_end_time = end_time
        debug_print("  malloced since previous collection:",
                    old_malloced, "bytes")
        debug_print("  heap usage at start of collection: ",
                    self.heap_usage + old_malloced, "bytes")
        debug_print("  freed:                             ",
                    freed_size, "bytes")
        debug_print("  new heap usage:                    ",
                    curr_heap_size, "bytes")
        debug_print("  total time spent collecting:       ",
                    self.total_collection_time, "seconds")
        debug_print("  collecting time:                   ",
                    collect_time)
        debug_print("  computing time:                    ",
                    collect_time)
        debug_print("  new threshold:                     ",
                    self.bytes_malloced_threshold)
##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
##                        size_gc_header)
        assert self.heap_usage + old_malloced == curr_heap_size + freed_size

        self.heap_usage = curr_heap_size
        hdr = self.malloced_objects_with_finalizer
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        last = lltype.nullptr(self.HDR)
        while hdr:
            next = hdr.next
            if hdr.mark:
                hdr.next = lltype.nullptr(self.HDR)
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = hdr
                else:
                    last.next = hdr
                hdr.mark = False
                last = hdr
            else:
                obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
                finalizer = self.getfinalizer(hdr.typeid16)
                # make malloced_objects_with_finalizer consistent
                # for the sake of a possible collection caused by finalizer
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = next
                else:
                    last.next = next
                hdr.next = self.malloced_objects
                self.malloced_objects = hdr
                #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
                finalizer(obj)
                if not self.collect_in_progress: # another collection was caused?
                    debug_print("outer collect interrupted "
                                "by recursive collect")
                    debug_stop("gc-collect")
                    return
                if not last:
                    if self.malloced_objects_with_finalizer == next:
                        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
                    else:
                        # now it gets annoying: finalizer caused a malloc of something
                        # with a finalizer
                        last = self.malloced_objects_with_finalizer
                        while last.next != next:
                            last = last.next
                            last.next = lltype.nullptr(self.HDR)
                else:
                    last.next = lltype.nullptr(self.HDR)
            hdr = next
        self.collect_in_progress = False
        debug_stop("gc-collect")
Ejemplo n.º 16
0
 def free1(p):
     llmemory.raw_free(p)
Ejemplo n.º 17
0
 def op_raw_free(self, addr):
     checkadr(addr)
     llmemory.raw_free(addr)
Ejemplo n.º 18
0
 def destroy(self, shadowstackref):
     llmemory.raw_free(shadowstackref.base)
     self._cleanup(shadowstackref)
Ejemplo n.º 19
0
 def forget_current_state(self):
     if self.unused_full_stack:
         llmemory.raw_free(self.unused_full_stack)
     self.unused_full_stack = self.gcdata.root_stack_base
     self.gcdata.root_stack_top = llmemory.NULL  # to detect missing restore
Ejemplo n.º 20
0
def llimpl_arena_free(arena_addr):
    llmemory.raw_free(arena_addr)
Ejemplo n.º 21
0
 def forget_current_state(self):
     if self.unused_full_stack:
         llmemory.raw_free(self.unused_full_stack)
     self.unused_full_stack = self.gcdata.root_stack_base
     self.gcdata.root_stack_top = llmemory.NULL  # to detect missing restore
Ejemplo n.º 22
0
 def destroy(self, shadowstackref):
     llmemory.raw_free(shadowstackref.base)
     self._cleanup(shadowstackref)
Ejemplo n.º 23
0
 def f(value):
     addr = llmemory.raw_malloc(16)
     addr.signed[0] = value
     res = addr.signed[0]
     llmemory.raw_free(addr)
     return res
Ejemplo n.º 24
0
    def collect(self, gen=0):
        # 1. mark from the roots, and also the objects that objects-with-del
        #    point to (using the list of malloced_objects_with_finalizer)
        # 2. walk the list of objects-without-del and free the ones not marked
        # 3. walk the list of objects-with-del and for the ones not marked:
        #    call __del__, move the object to the list of object-without-del
        import time
        debug_start("gc-collect")
        start_time = time.time()
        self.collect_in_progress = True
        size_gc_header = self.gcheaderbuilder.size_gc_header
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)

        # push the roots on the mark stack
        objects = self.AddressStack()  # mark stack
        self._mark_stack = objects
        self.root_walker.walk_roots(
            MarkSweepGC._mark_root,  # stack roots
            MarkSweepGC._mark_root,  # static in prebuilt non-gc structures
            MarkSweepGC._mark_root)  # static in prebuilt gc objects

        # from this point onwards, no more mallocs should be possible
        old_malloced = self.bytes_malloced
        self.bytes_malloced = 0
        curr_heap_size = 0
        freed_size = 0

        # mark objects reachable by objects with a finalizer, but not those
        # themselves. add their size to curr_heap_size, since they always
        # survive the collection
        hdr = self.malloced_objects_with_finalizer
        while hdr:
            next = hdr.next
            typeid = hdr.typeid16
            gc_info = llmemory.cast_ptr_to_adr(hdr)
            obj = gc_info + size_gc_header
            if not hdr.mark:
                self.add_reachable_to_stack(obj, objects)
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            if self.is_varsize(typeid):
                length = (obj +
                          self.varsize_offset_to_length(typeid)).signed[0]
                size += self.varsize_item_sizes(typeid) * length
            estimate = raw_malloc_usage(size_gc_header + size)
            curr_heap_size += estimate
            hdr = next

        # mark thinks on the mark stack and put their descendants onto the
        # stack until the stack is empty
        while objects.non_empty():  #mark
            curr = objects.pop()
            gc_info = curr - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            if hdr.mark:
                continue
            self.add_reachable_to_stack(curr, objects)
            hdr.mark = True
        objects.delete()
        # also mark self.curpool
        if self.curpool:
            gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
            hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
            hdr.mark = True
        # go through the list of objects containing weak pointers
        # and kill the links if they go to dead objects
        # if the object itself is not marked, free it
        hdr = self.objects_with_weak_pointers
        surviving = lltype.nullptr(self.HDR)
        while hdr:
            typeid = hdr.typeid16
            next = hdr.next
            addr = llmemory.cast_ptr_to_adr(hdr)
            size = self.fixed_size(typeid)
            estimate = raw_malloc_usage(size_gc_header + size)
            if hdr.mark:
                offset = self.weakpointer_offset(typeid)
                hdr.mark = False
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                pointing_to = (weakref_obj + offset).address[0]
                if pointing_to:
                    gc_info_pointing_to = pointing_to - size_gc_header
                    hdr_pointing_to = llmemory.cast_adr_to_ptr(
                        gc_info_pointing_to, self.HDRPTR)
                    # pointed to object will die
                    # XXX what to do if the object has a finalizer which resurrects
                    # the object?
                    if not hdr_pointing_to.mark:
                        (weakref_obj + offset).address[0] = NULL
                hdr.next = surviving
                surviving = hdr
                curr_heap_size += estimate
            else:
                gc_info = llmemory.cast_ptr_to_adr(hdr)
                weakref_obj = gc_info + size_gc_header
                self.write_free_statistics(typeid, weakref_obj)
                freed_size += estimate
                raw_free(addr)
            hdr = next
        self.objects_with_weak_pointers = surviving
        # sweep: delete objects without del if they are not marked
        # unmark objects without del that are marked
        firstpoolnode = lltype.malloc(self.POOLNODE, flavor='raw')
        firstpoolnode.linkedlist = self.malloced_objects
        firstpoolnode.nextnode = self.poolnodes
        prevpoolnode = lltype.nullptr(self.POOLNODE)
        poolnode = firstpoolnode
        while poolnode:  #sweep
            ppnext = llmemory.cast_ptr_to_adr(poolnode)
            ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
            hdr = poolnode.linkedlist
            while hdr:  #sweep
                typeid = hdr.typeid16
                next = hdr.next
                addr = llmemory.cast_ptr_to_adr(hdr)
                size = self.fixed_size(typeid)
                if self.is_varsize(typeid):
                    length = (addr + size_gc_header +
                              self.varsize_offset_to_length(typeid)).signed[0]
                    size += self.varsize_item_sizes(typeid) * length
                estimate = raw_malloc_usage(size_gc_header + size)
                if hdr.mark:
                    hdr.mark = False
                    ppnext.address[0] = addr
                    ppnext = llmemory.cast_ptr_to_adr(hdr)
                    ppnext += llmemory.offsetof(self.HDR, 'next')
                    curr_heap_size += estimate
                else:
                    gc_info = llmemory.cast_ptr_to_adr(hdr)
                    obj = gc_info + size_gc_header
                    self.write_free_statistics(typeid, obj)
                    freed_size += estimate
                    raw_free(addr)
                hdr = next
            ppnext.address[0] = llmemory.NULL
            next = poolnode.nextnode
            if not poolnode.linkedlist and prevpoolnode:
                # completely empty node
                prevpoolnode.nextnode = next
                lltype.free(poolnode, flavor='raw')
            else:
                prevpoolnode = poolnode
            poolnode = next
        self.malloced_objects = firstpoolnode.linkedlist
        self.poolnodes = firstpoolnode.nextnode
        lltype.free(firstpoolnode, flavor='raw')
        #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)

        end_time = time.time()
        compute_time = start_time - self.prev_collect_end_time
        collect_time = end_time - start_time

        garbage_collected = old_malloced - (curr_heap_size - self.heap_usage)

        if (collect_time * curr_heap_size >
                0.02 * garbage_collected * compute_time):
            self.bytes_malloced_threshold += self.bytes_malloced_threshold / 2
        if (collect_time * curr_heap_size <
                0.005 * garbage_collected * compute_time):
            self.bytes_malloced_threshold /= 2

        # Use atleast as much memory as current live objects.
        if curr_heap_size > self.bytes_malloced_threshold:
            self.bytes_malloced_threshold = curr_heap_size

        # Cap at 1/4 GB
        self.bytes_malloced_threshold = min(self.bytes_malloced_threshold,
                                            256 * 1024 * 1024)
        self.total_collection_time += collect_time
        self.prev_collect_end_time = end_time
        debug_print("  malloced since previous collection:", old_malloced,
                    "bytes")
        debug_print("  heap usage at start of collection: ",
                    self.heap_usage + old_malloced, "bytes")
        debug_print("  freed:                             ", freed_size,
                    "bytes")
        debug_print("  new heap usage:                    ", curr_heap_size,
                    "bytes")
        debug_print("  total time spent collecting:       ",
                    self.total_collection_time, "seconds")
        debug_print("  collecting time:                   ", collect_time)
        debug_print("  computing time:                    ", collect_time)
        debug_print("  new threshold:                     ",
                    self.bytes_malloced_threshold)
        ##        llop.debug_view(lltype.Void, self.malloced_objects, self.poolnodes,
        ##                        size_gc_header)
        assert self.heap_usage + old_malloced == curr_heap_size + freed_size

        self.heap_usage = curr_heap_size
        hdr = self.malloced_objects_with_finalizer
        self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
        last = lltype.nullptr(self.HDR)
        while hdr:
            next = hdr.next
            if hdr.mark:
                hdr.next = lltype.nullptr(self.HDR)
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = hdr
                else:
                    last.next = hdr
                hdr.mark = False
                last = hdr
            else:
                obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
                finalizer = self.getfinalizer(hdr.typeid16)
                # make malloced_objects_with_finalizer consistent
                # for the sake of a possible collection caused by finalizer
                if not self.malloced_objects_with_finalizer:
                    self.malloced_objects_with_finalizer = next
                else:
                    last.next = next
                hdr.next = self.malloced_objects
                self.malloced_objects = hdr
                #llop.debug_view(lltype.Void, self.malloced_objects, self.malloced_objects_with_finalizer, size_gc_header)
                finalizer(obj, llmemory.NULL)
                if not self.collect_in_progress:  # another collection was caused?
                    debug_print("outer collect interrupted "
                                "by recursive collect")
                    debug_stop("gc-collect")
                    return
                if not last:
                    if self.malloced_objects_with_finalizer == next:
                        self.malloced_objects_with_finalizer = lltype.nullptr(
                            self.HDR)
                    else:
                        # now it gets annoying: finalizer caused a malloc of something
                        # with a finalizer
                        last = self.malloced_objects_with_finalizer
                        while last.next != next:
                            last = last.next
                            last.next = lltype.nullptr(self.HDR)
                else:
                    last.next = lltype.nullptr(self.HDR)
            hdr = next
        self.collect_in_progress = False
        debug_stop("gc-collect")
Ejemplo n.º 25
0
 def free1(p):
     llmemory.raw_free(p)
Ejemplo n.º 26
0
 def __del__(self):
     llmemory.raw_free(self.addr)
Ejemplo n.º 27
0
 def __del__(self):
     llmemory.raw_free(self.addr)
Ejemplo n.º 28
0
def llimpl_arena_free(arena_addr):
    llmemory.raw_free(arena_addr)
Ejemplo n.º 29
0
 def f(value):
     addr = llmemory.raw_malloc(16)
     addr.signed[0] = value
     res = addr.signed[0]
     llmemory.raw_free(addr)
     return res
Ejemplo n.º 30
0
 def _free_if_not_current(aid, stacktop, _):
     if stacktop != llmemory.NULL and aid != gcdata.active_thread:
         end = stacktop - sizeofaddr
         base = end.address[0]
         llmemory.raw_free(base)
Ejemplo n.º 31
0
    def sweep_rawmalloced_objects(self, generation):
        # free all the rawmalloced objects of the specified generation
        # that have not been marked
        if generation == 2:
            objects = self.gen2_rawmalloced_objects
            # generation 2 sweep: if A points to an object object B that
            # moves from gen2 to gen3, it's possible that A no longer points
            # to any gen2 object.  In this case, A remains a bit too long in
            # last_generation_root_objects, but this will be fixed by the
            # next collect_last_generation_roots().
        elif generation == 3:
            objects = self.gen3_rawmalloced_objects
            # generation 3 sweep: remove from last_generation_root_objects
            # all the objects that we are about to free
            gen3roots = self.last_generation_root_objects
            newgen3roots = self.AddressStack()
            while gen3roots.non_empty():
                obj = gen3roots.pop()
                if not (self.header(obj).tid & GCFLAG_UNVISITED):
                    newgen3roots.append(obj)
            gen3roots.delete()
            self.last_generation_root_objects = newgen3roots
        else:
            # mostly a hack: the generation number -2 is the part of the
            # generation 2 that lives in gen2_resizable_objects
            ll_assert(generation == -2, "bogus 'generation'")
            objects = self.gen2_resizable_objects

        surviving_objects = self.AddressStack()
        # Help the flow space
        alive_count = alive_size = dead_count = dead_size = 0
        while objects.non_empty():
            obj = objects.pop()
            tid = self.header(obj).tid
            if tid & GCFLAG_UNVISITED:
                if self.config.gcconfig.debugprint:
                    dead_count += 1
                    dead_size += raw_malloc_usage(self.get_size_incl_hash(obj))
                addr = obj - self.gcheaderbuilder.size_gc_header
                llmemory.raw_free(addr)
            else:
                if self.config.gcconfig.debugprint:
                    alive_count += 1
                    alive_size += raw_malloc_usage(
                        self.get_size_incl_hash(obj))
                if generation == 3:
                    surviving_objects.append(obj)
                elif generation == 2:
                    ll_assert((tid & GCFLAG_AGE_MASK) < GCFLAG_AGE_MAX,
                              "wrong age for generation 2 object")
                    tid += GCFLAG_AGE_ONE
                    if (tid & GCFLAG_AGE_MASK) == GCFLAG_AGE_MAX:
                        # the object becomes part of generation 3
                        self.gen3_rawmalloced_objects.append(obj)
                        # GCFLAG_NO_HEAP_PTRS not set yet, conservatively
                        self.last_generation_root_objects.append(obj)
                    else:
                        # the object stays in generation 2
                        tid |= GCFLAG_UNVISITED
                        surviving_objects.append(obj)
                    self.header(obj).tid = tid
                elif generation == -2:
                    # the object stays in generation -2
                    tid |= GCFLAG_UNVISITED
                    surviving_objects.append(obj)
                    self.header(obj).tid = tid
        objects.delete()
        if generation == 2:
            self.gen2_rawmalloced_objects = surviving_objects
        elif generation == 3:
            self.gen3_rawmalloced_objects = surviving_objects
        elif generation == -2:
            self.gen2_resizable_objects = surviving_objects
        if self.config.gcconfig.debugprint:
            llop.debug_print(lltype.Void, "| [hyb] gen", generation,
                             "nonmoving now alive: ", alive_size, "bytes in",
                             alive_count, "objs")
            llop.debug_print(lltype.Void, "| [hyb] gen", generation,
                             "nonmoving freed:     ", dead_size, "bytes in",
                             dead_count, "objs")
Ejemplo n.º 32
0
Archivo: hybrid.py Proyecto: ieure/pypy
    def sweep_rawmalloced_objects(self, generation):
        # free all the rawmalloced objects of the specified generation
        # that have not been marked
        if generation == 2:
            objects = self.gen2_rawmalloced_objects
            # generation 2 sweep: if A points to an object object B that
            # moves from gen2 to gen3, it's possible that A no longer points
            # to any gen2 object.  In this case, A remains a bit too long in
            # last_generation_root_objects, but this will be fixed by the
            # next collect_last_generation_roots().
        elif generation == 3:
            objects = self.gen3_rawmalloced_objects
            # generation 3 sweep: remove from last_generation_root_objects
            # all the objects that we are about to free
            gen3roots = self.last_generation_root_objects
            newgen3roots = self.AddressStack()
            while gen3roots.non_empty():
                obj = gen3roots.pop()
                if not (self.header(obj).tid & GCFLAG_UNVISITED):
                    newgen3roots.append(obj)
            gen3roots.delete()
            self.last_generation_root_objects = newgen3roots
        else:
            ll_assert(False, "bogus 'generation'")
            return 0 # to please the flowspace

        surviving_objects = self.AddressStack()
        # Help the flow space
        alive_count = alive_size = dead_count = dead_size = 0
        debug = have_debug_prints()
        while objects.non_empty():
            obj = objects.pop()
            tid = self.header(obj).tid
            if tid & GCFLAG_UNVISITED:
                if debug:
                    dead_count+=1
                    dead_size+=raw_malloc_usage(self.get_size_incl_hash(obj))
                addr = obj - self.gcheaderbuilder.size_gc_header
                llmemory.raw_free(addr)
            else:
                if debug:
                    alive_count+=1
                alive_size+=raw_malloc_usage(self.get_size_incl_hash(obj))
                if generation == 3:
                    surviving_objects.append(obj)
                elif generation == 2:
                    ll_assert((tid & GCFLAG_AGE_MASK) < GCFLAG_AGE_MAX,
                              "wrong age for generation 2 object")
                    tid += GCFLAG_AGE_ONE
                    if (tid & GCFLAG_AGE_MASK) == GCFLAG_AGE_MAX:
                        # the object becomes part of generation 3
                        self.gen3_rawmalloced_objects.append(obj)
                        # GCFLAG_NO_HEAP_PTRS not set yet, conservatively
                        self.last_generation_root_objects.append(obj)
                    else:
                        # the object stays in generation 2
                        tid |= GCFLAG_UNVISITED
                        surviving_objects.append(obj)
                    self.header(obj).tid = tid
        objects.delete()
        if generation == 2:
            self.gen2_rawmalloced_objects = surviving_objects
        elif generation == 3:
            self.gen3_rawmalloced_objects = surviving_objects
        debug_print("| [hyb] gen", generation,
                    "nonmoving now alive: ",
                    alive_size, "bytes in",
                    alive_count, "objs")
        debug_print("| [hyb] gen", generation,
                    "nonmoving freed:     ",
                    dead_size, "bytes in",
                    dead_count, "objs")
        return alive_size
Ejemplo n.º 33
0
 def op_raw_free(self, addr):
     checkadr(addr)
     llmemory.raw_free(addr)