Пример #1
0
 def _fixup(self):
     if self.ptr is not None and self.ptr._was_freed():
         # hack to support llarena.test_replace_object_with_stub()
         from pypy.rpython.lltypesystem import llarena
         return llarena.getfakearenaaddress(self)
     else:
         return self
Пример #2
0
 def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field):
     # 'basesize' is the size without any hash field
     # restore the normal header
     hdr = self.header_forwarded(obj)
     gcflags = hdr.tid & 3
     if grow_hash_field:
         gcflags |= GCFLAG_SAVED_HASHFIELD
         hashvalue = self.get_identityhash_from_addr(obj)
     elif gcflags & GCFLAG_SAVED_HASHFIELD:
         fromaddr = llarena.getfakearenaaddress(obj)
         fromaddr -= self.gcheaderbuilder.size_gc_header
         hashvalue = (fromaddr + basesize).signed[0]
     else:
         hashvalue = 0  # not used
     #
     hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit)
     #
     fromaddr = obj - self.gcheaderbuilder.size_gc_header
     if translated_to_c():
         llmemory.raw_memmove(fromaddr, toaddr, basesize)
     else:
         llmemory.raw_memcopy(fromaddr, toaddr, basesize)
     #
     if gcflags & GCFLAG_SAVED_HASHFIELD:
         (toaddr + basesize).signed[0] = hashvalue
Пример #3
0
 def copy_and_compact(self, obj, typeid, basesize, toaddr, grow_hash_field):
     # 'basesize' is the size without any hash field
     # restore the normal header
     hdr = self.header_forwarded(obj)
     gcflags = hdr.tid & 3
     if grow_hash_field:
         gcflags |= GCFLAG_SAVED_HASHFIELD
         hashvalue = self.get_identityhash_from_addr(obj)
     elif gcflags & GCFLAG_SAVED_HASHFIELD:
         fromaddr = llarena.getfakearenaaddress(obj)
         fromaddr -= self.gcheaderbuilder.size_gc_header
         hashvalue = (fromaddr + basesize).signed[0]
     else:
         hashvalue = 0  # not used
     #
     hdr.tid = self.combine(typeid, gcflags << first_gcflag_bit)
     #
     fromaddr = obj - self.gcheaderbuilder.size_gc_header
     if translated_to_c():
         llmemory.raw_memmove(fromaddr, toaddr, basesize)
     else:
         llmemory.raw_memcopy(fromaddr, toaddr, basesize)
     #
     if gcflags & GCFLAG_SAVED_HASHFIELD:
         (toaddr + basesize).signed[0] = hashvalue
Пример #4
0
 def get_identityhash_from_addr(self, obj):
     if translated_to_c():
         return llmemory.cast_adr_to_int(obj)  # direct case
     else:
         try:
             adr = llarena.getfakearenaaddress(obj)  # -> arena address
         except RuntimeError:
             return llmemory.cast_adr_to_int(obj)  # not in an arena...
         return adr - self.space
Пример #5
0
 def get_identityhash_from_addr(self, obj):
     if translated_to_c():
         return llmemory.cast_adr_to_int(obj)  # direct case
     else:
         try:
             adr = llarena.getfakearenaaddress(obj)  # -> arena address
         except RuntimeError:
             return llmemory.cast_adr_to_int(obj)  # not in an arena...
         return adr - self.space
Пример #6
0
 def _nuninitialized(self, page, size_class):
     # Helper for debugging: count the number of uninitialized blocks
     freeblock = page.freeblock
     for i in range(page.nfree):
         freeblock = freeblock.address[0]
     assert freeblock != NULL
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     num_initialized_blocks, rem = divmod(
         freeblock - pageaddr - self.hdrsize, size_class * WORD)
     assert rem == 0, "page size_class misspecified?"
     nblocks = self.nblocks_for_size[size_class]
     return nblocks - num_initialized_blocks
Пример #7
0
 def _get_object_hash(self, obj, objsize, tid):
     # Returns the hash of the object, which must not be GC_HASH_NOTTAKEN.
     gc_hash = tid & GCFLAG_HASHMASK
     if gc_hash == GC_HASH_HASFIELD:
         obj = llarena.getfakearenaaddress(obj)
         return (obj + objsize).signed[0]
     elif gc_hash == GC_HASH_TAKEN_ADDR:
         return llmemory.cast_adr_to_int(obj)
     elif gc_hash == GC_HASH_TAKEN_NURS:
         return self._compute_current_nursery_hash(obj)
     else:
         assert 0, "gc_hash == GC_HASH_NOTTAKEN"
Пример #8
0
 def _nuninitialized(self, page, size_class):
     # Helper for debugging: count the number of uninitialized blocks
     freeblock = page.freeblock
     for i in range(page.nfree):
         freeblock = freeblock.address[0]
     assert freeblock != NULL
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     num_initialized_blocks, rem = divmod(
         freeblock - pageaddr - self.hdrsize, size_class * WORD)
     assert rem == 0, "page size_class misspecified?"
     nblocks = self.nblocks_for_size[size_class]
     return nblocks - num_initialized_blocks
Пример #9
0
 def _get_object_hash(self, obj, objsize, tid):
     # Returns the hash of the object, which must not be GC_HASH_NOTTAKEN.
     gc_hash = tid & GCFLAG_HASHMASK
     if gc_hash == GC_HASH_HASFIELD:
         obj = llarena.getfakearenaaddress(obj)
         return (obj + objsize).signed[0]
     elif gc_hash == GC_HASH_TAKEN_ADDR:
         return llmemory.cast_adr_to_int(obj)
     elif gc_hash == GC_HASH_TAKEN_NURS:
         return self._compute_current_nursery_hash(obj)
     else:
         assert 0, "gc_hash == GC_HASH_NOTTAKEN"
Пример #10
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Пример #11
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Пример #12
0
 def identityhash(self, gcobj):
     # Unlike SemiSpaceGC.identityhash(), this function does not have
     # to care about reducing top_of_space.  The reason is as
     # follows.  When we collect, each object either moves to the
     # left or stays where it is.  If it moves to the left (and if it
     # has GCFLAG_HASHTAKEN), we can give it a hash field, and the
     # end of the new object cannot move to the right of the end of
     # the old object.  If it stays where it is, then we don't need
     # to add the hash field.  So collecting can never actually grow
     # the consumed size.
     obj = llmemory.cast_ptr_to_adr(gcobj)
     hdr = self.header(obj)
     #
     if hdr.tid & GCFLAG_HASHFIELD:  # the hash is in a field at the end
         obj = llarena.getfakearenaaddress(obj) + self.get_size(obj)
         return obj.signed[0]
     #
     hdr.tid |= GCFLAG_HASHTAKEN
     return self.get_identityhash_from_addr(obj)
Пример #13
0
 def identityhash(self, gcobj):
     # Unlike SemiSpaceGC.identityhash(), this function does not have
     # to care about reducing top_of_space.  The reason is as
     # follows.  When we collect, each object either moves to the
     # left or stays where it is.  If it moves to the left (and if it
     # has GCFLAG_HASHTAKEN), we can give it a hash field, and the
     # end of the new object cannot move to the right of the end of
     # the old object.  If it stays where it is, then we don't need
     # to add the hash field.  So collecting can never actually grow
     # the consumed size.
     obj = llmemory.cast_ptr_to_adr(gcobj)
     hdr = self.header(obj)
     #
     if hdr.tid & GCFLAG_HASHFIELD:  # the hash is in a field at the end
         obj = llarena.getfakearenaaddress(obj) + self.get_size(obj)
         return obj.signed[0]
     #
     hdr.tid |= GCFLAG_HASHTAKEN
     return self.get_identityhash_from_addr(obj)
Пример #14
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #15
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #16
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0    # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Пример #17
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0  # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving