Esempio n. 1
0
 def malloc_fixedsize_clear(self,
                            typeid,
                            size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize
          and raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Esempio n. 2
0
def test_address_order():
    a = arena_malloc(24, False)
    assert eq(a, a)
    assert lt(a, a + 1)
    assert lt(a + 5, a + 20)

    b = arena_malloc(24, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a + 19, b)
    assert lt(a, b + 19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Esempio n. 3
0
def test_address_order():
    a = arena_malloc(20, False)
    assert eq(a, a)
    assert lt(a, a+1)
    assert lt(a+5, a+20)

    b = arena_malloc(20, False)
    if a > b:
        a, b = b, a
    assert lt(a, b)
    assert lt(a+19, b)
    assert lt(a, b+19)

    c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(c, precomputed_size)
    assert lt(b, c)
    assert lt(a, c)
    assert lt(llmemory.NULL, c)
    d = c + llmemory.offsetof(SX, 'x')
    assert lt(c, d)
    assert lt(b, d)
    assert lt(a, d)
    assert lt(llmemory.NULL, d)
    e = c + precomputed_size
    assert lt(d, e)
    assert lt(c, e)
    assert lt(b, e)
    assert lt(a, e)
    assert lt(llmemory.NULL, e)
Esempio n. 4
0
 def malloc_varsize_clear(self,
                          typeid,
                          length,
                          size,
                          itemsize,
                          offset_to_length,
                          can_collect,
                          has_finalizer=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         if not can_collect:
             raise memoryError
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Esempio n. 5
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     if (has_finalizer or not can_collect or
         (raw_malloc_usage(size) > self.lb_young_var_basesize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   can_collect,
                                                   has_finalizer,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 6
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Esempio n. 7
0
    def update_forward_pointers(self, toaddr, maxnum):
        self.base_forwarding_addr = base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        num = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            # compute the original object size, including the
            # optional hash field
            basesize = size_gc_header + self.get_size(obj)
            totalsrcsize = basesize
            if hdr.tid & GCFLAG_HASHFIELD:  # already a hash field, copy it too
                totalsrcsize += llmemory.sizeof(lltype.Signed)
            #
            if self.marked(obj):
                # the object is marked as suriving.  Compute the new object
                # size
                totaldstsize = totalsrcsize
                if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
                    # grow a new hash field -- with the exception: if
                    # the object actually doesn't move, don't
                    # (otherwise, we get a bogus toaddr > fromaddr)
                    if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                        totaldstsize += llmemory.sizeof(lltype.Signed)
                #
                if not translated_to_c():
                    llarena.arena_reserve(toaddr, basesize)
                    if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
                        llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
                #
                # save the field hdr.tid in the array tid_backup
                ll_assert(num < maxnum, "overflow of the tid_backup table")
                self.tid_backup[num] = self.get_type_id(obj)
                num += 1
                # compute forward_offset, the offset to the future copy
                # of this object
                forward_offset = toaddr - base_forwarding_addr
                # copy the first two gc flags in forward_offset
                ll_assert(forward_offset & 3 == 0, "misalignment!")
                forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
                hdr.tid = forward_offset | GCFLAG_MARKBIT
                ll_assert(self.marked(obj), "re-marking object failed!")
                # done
                toaddr += totaldstsize
            #
            fromaddr += totalsrcsize
            if not translated_to_c():
                assert toaddr - base_forwarding_addr <= fromaddr - self.space
        self.num_alive_objs = num
        self.finaladdr = toaddr

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_ref,  # stack roots
            MarkCompactGC._update_ref,  # static in prebuilt non-gc structures
            MarkCompactGC._update_ref,
        )  # static in prebuilt gc objects
        self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
Esempio n. 8
0
 def make_a_copy(self, obj, objsize):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     self.free += totalsize
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 9
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(
         raw_malloc_usage(totalsize) <= self.nursery_top - result,
         "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
     return result
Esempio n. 10
0
File: hybrid.py Progetto: ieure/pypy
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Esempio n. 11
0
 def make_a_copy(self, obj, objsize):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     self.free += totalsize
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 12
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty()
             or self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(
         size_of_alive_objs, needed + num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(
         tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs, lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space, lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty()
             or self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space = toaddr
     self.free = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Esempio n. 13
0
def test_shrink_obj():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                             ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Esempio n. 14
0
def test_shrink_obj():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('h', lltype.Signed))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    S = lltype.GcStruct('S', ('x', lltype.Signed),
                        ('a', lltype.Array(lltype.Signed)))
    myarenasize = 200
    a = arena_malloc(myarenasize, False)
    arena_reserve(a, size_gc_header + llmemory.sizeof(S, 10))
    arena_shrink_obj(a, size_gc_header + llmemory.sizeof(S, 5))
    arena_reset(a, size_gc_header + llmemory.sizeof(S, 5), False)
Esempio n. 15
0
 def _get_memory(self, totalsize):
     # also counts the space that will be needed during the following
     # collection to store the TID
     requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
     self.next_collect_after -= requested_size
     if self.next_collect_after < 0:
         result = self.obtain_free_space(requested_size)
     else:
         result = self.free
     self.free += totalsize
     llarena.arena_reserve(result, totalsize)
     return result
Esempio n. 16
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a+1) != a1
    assert not ((a+1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a+1) == llmemory.cast_adr_to_int(a1) + 1
Esempio n. 17
0
 def _get_memory(self, totalsize):
     # also counts the space that will be needed during the following
     # collection to store the TID
     requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
     self.next_collect_after -= requested_size
     if self.next_collect_after < 0:
         result = self.obtain_free_space(requested_size)
     else:
         result = self.free
     self.free += totalsize
     llarena.arena_reserve(result, totalsize)
     return result
Esempio n. 18
0
def test_address_eq_as_int():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    p = llmemory.cast_adr_to_ptr(a, SPTR)
    a1 = llmemory.cast_ptr_to_adr(p)
    assert a == a1
    assert not (a != a1)
    assert (a + 1) != a1
    assert not ((a + 1) == a1)
    py.test.skip("cast_adr_to_int() is hard to get consistent")
    assert llmemory.cast_adr_to_int(a) == llmemory.cast_adr_to_int(a1)
    assert llmemory.cast_adr_to_int(a + 1) == llmemory.cast_adr_to_int(a1) + 1
Esempio n. 19
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty()
                or self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True  # executed some finalizers
        else:
            return False  # no finalizer executed
Esempio n. 20
0
    def markcompactcollect(self, requested_size=0):
        self.debug_collect_start(requested_size)
        self.debug_check_consistency()
        #
        # Mark alive objects
        #
        self.to_see = self.AddressDeque()
        self.trace_from_roots()
        self.to_see.delete()
        #
        # Prepare new views on the same memory
        #
        toaddr = llarena.arena_new_view(self.space)
        maxnum = self.space_size - (self.free - self.space)
        maxnum /= BYTES_PER_TID
        llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
        self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
                                                   lltype.Ptr(TID_BACKUP))
        #
        # Walk all objects and assign forward pointers in the same order,
        # also updating all references
        #
        self.update_forward_pointers(toaddr, maxnum)
        if (self.run_finalizers.non_empty() or
            self.objects_with_finalizers.non_empty()):
            self.update_run_finalizers()

        self.update_objects_with_id()
        self.compact()
        #
        self.tid_backup = lltype.nullptr(TID_BACKUP)
        self.free = self.finaladdr
        self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
                                                       self.num_alive_objs,
                                                       requested_size)
        #
        if not translated_to_c():
            remaining_size = (toaddr + self.space_size) - self.finaladdr
            llarena.arena_reset(self.finaladdr, remaining_size, False)
            llarena.arena_free(self.space)
            self.space = toaddr
        #
        self.debug_check_consistency()
        self.debug_collect_finish()
        if self.next_collect_after < 0:
            raise MemoryError
        #
        if self.run_finalizers.non_empty():
            self.execute_finalizers()
            return True      # executed some finalizers
        else:
            return False     # no finalizer executed
Esempio n. 21
0
    def malloc_varsize_clear(self,
                             typeid,
                             length,
                             size,
                             itemsize,
                             offset_to_length,
                             can_collect,
                             has_finalizer=False):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))

            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (has_finalizer or not can_collect or too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize
             and raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                        llmemory.GCREF)
Esempio n. 22
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0, 
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Esempio n. 23
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Esempio n. 24
0
 def free_page(self, page):
     """Free a whole page."""
     #
     # Insert the freed page in the arena's 'freepages' list.
     # If nfreepages == totalpages, then it will be freed at the
     # end of mass_free().
     arena = page.arena
     arena.nfreepages += 1
     pageaddr = llmemory.cast_ptr_to_adr(page)
     pageaddr = llarena.getfakearenaaddress(pageaddr)
     llarena.arena_reset(pageaddr, self.page_size, 0)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
     pageaddr.address[0] = arena.freepages
     arena.freepages = pageaddr
Esempio n. 25
0
 def malloc_fixedsize_clear(self, typeid16, size, has_finalizer=False, contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Esempio n. 26
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Esempio n. 27
0
def test_look_inside_object():
    # this code is also used in translation tests below
    myarenasize = 50
    a = arena_malloc(myarenasize, False)
    b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char))
    arena_reserve(b, precomputed_size)
    (b + llmemory.offsetof(SX, 'x')).signed[0] = 123
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123
    llmemory.cast_adr_to_ptr(b, SPTR).x += 1
    assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124
    arena_reset(a, myarenasize, True)
    arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX)))
    assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
    arena_free(a)
    return 42
Esempio n. 28
0
def test_arena_protect():
    a = arena_malloc(100, False)
    S = lltype.Struct('S', ('x', lltype.Signed))
    arena_reserve(a, llmemory.sizeof(S))
    p = llmemory.cast_adr_to_ptr(a, lltype.Ptr(S))
    p.x = 123
    assert p.x == 123
    arena_protect(a, 100, True)
    py.test.raises(ArenaError, arena_reserve, a + 48, llmemory.sizeof(S))
    py.test.raises(RuntimeError, "p.x")
    py.test.raises(RuntimeError, "p.x = 124")
    arena_protect(a, 100, False)
    assert p.x == 123
    p.x = 125
    assert p.x == 125
Esempio n. 29
0
 def allocate_new_page(self, size_class):
     """Allocate and return a new page for the given size_class."""
     #
     # Allocate a new arena if needed.
     if self.current_arena == ARENA_NULL:
         self.allocate_new_arena()
     #
     # The result is simply 'current_arena.freepages'.
     arena = self.current_arena
     result = arena.freepages
     if arena.nfreepages > 0:
         #
         # The 'result' was part of the chained list; read the next.
         arena.nfreepages -= 1
         freepages = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized pages.
         ll_assert(self.num_uninitialized_pages > 0,
                   "fully allocated arena found in self.current_arena")
         self.num_uninitialized_pages -= 1
         if self.num_uninitialized_pages > 0:
             freepages = result + self.page_size
         else:
             freepages = NULL
     #
     arena.freepages = freepages
     if freepages == NULL:
         # This was the last page, so put the arena away into
         # arenas_lists[0].
         ll_assert(arena.nfreepages == 0,
                   "freepages == NULL but nfreepages > 0")
         arena.nextarena = self.arenas_lists[0]
         self.arenas_lists[0] = arena
         self.current_arena = ARENA_NULL
     #
     # Initialize the fields of the resulting page
     llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
     page.arena = arena
     page.nfree = 0
     page.freeblock = result + self.hdrsize
     page.nextpage = PAGE_NULL
     ll_assert(self.page_for_size[size_class] == PAGE_NULL,
               "allocate_new_page() called but a page is already waiting")
     self.page_for_size[size_class] = page
     return page
Esempio n. 30
0
    def malloc_varsize_clear(self,
                             typeid,
                             length,
                             size,
                             itemsize,
                             offset_to_length,
                             can_collect,
                             has_finalizer=False):
        if has_finalizer or not can_collect:
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Esempio n. 31
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize, llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 32
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Esempio n. 33
0
 def copy(self, obj):
     if self.is_forwarded(obj):
         #llop.debug_print(lltype.Void, obj, "already copied to", self.get_forwarding_address(obj))
         return self.get_forwarding_address(obj)
     else:
         newaddr = self.free
         objsize = self.get_size(obj)
         totalsize = self.size_gc_header() + objsize
         llarena.arena_reserve(newaddr, totalsize)
         raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
         self.free += totalsize
         newobj = newaddr + self.size_gc_header()
         #llop.debug_print(lltype.Void, obj, "copied to", newobj,
         #                 "tid", self.header(obj).tid,
         #                 "size", totalsize)
         self.set_forwarding_address(obj, newobj, objsize)
         return newobj
Esempio n. 34
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length, can_collect,
                             has_finalizer=False):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))
            
            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (has_finalizer or not can_collect or
            too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize and
             raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 35
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         if not can_collect:
             raise memoryError
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid)
     self.free = result + totalsize
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 36
0
 def fn(argv):
     testrun = int(argv[1])
     a = arena_malloc(65536, False)
     arena_reserve(a, llmemory.sizeof(S))
     p = llmemory.cast_adr_to_ptr(a + 23432, lltype.Ptr(S))
     p.x = 123
     assert p.x == 123
     arena_protect(a, 65536, True)
     result = 0
     if testrun == 1:
         print p.x  # segfault
     if testrun == 2:
         p.x = 124  # segfault
     arena_protect(a, 65536, False)
     p.x += 10
     print p.x
     return 0
Esempio n. 37
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     if tid & GCFLAG_HASHMASK:
         hash = self._get_object_hash(obj, objsize, tid)
         llarena.arena_reserve(newaddr + totalsize,
                               llmemory.sizeof(lltype.Signed))
         (newaddr + totalsize).signed[0] = hash
         tid |= GC_HASH_HASFIELD
         totalsize += llmemory.sizeof(lltype.Signed)
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 38
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0, "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Esempio n. 39
0
 def set_forwarding_address(self, obj, newobj, objsize):
     # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
     # overwrite the object with a FORWARDSTUB.  Doing so is a bit
     # long-winded on llarena, but it all melts down to two memory
     # writes after translation to C.
     size_gc_header = self.size_gc_header()
     stubsize = llmemory.sizeof(self.FORWARDSTUB)
     tid = self.header(obj).tid
     ll_assert(tid & GCFLAG_EXTERNAL == 0,  "unexpected GCFLAG_EXTERNAL")
     ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
     # replace the object at 'obj' with a FORWARDSTUB.
     hdraddr = obj - size_gc_header
     llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
     llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
     hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
     hdr.tid = tid | GCFLAG_FORWARDED
     stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
     stub.forw = newobj
Esempio n. 40
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length, can_collect,
                             has_finalizer=False):
        if has_finalizer or not can_collect:
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result+size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Esempio n. 41
0
 def malloc_fixedsize_clear(self, typeid16, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 42
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Esempio n. 43
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     #
     # check if we need to write a hash value at the end of the new obj
     if tid & (GCFLAG_HASHTAKEN|GCFLAG_HASHFIELD):
         if tid & GCFLAG_HASHFIELD:
             hash = (obj + objsize).signed[0]
         else:
             hash = llmemory.cast_adr_to_int(obj)
             tid |= GCFLAG_HASHFIELD
         (newaddr + totalsize).signed[0] = hash
         totalsize += llmemory.sizeof(lltype.Signed)
     #
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 44
0
 def _make_a_copy_with_tid(self, obj, objsize, tid):
     totalsize = self.size_gc_header() + objsize
     newaddr = self.free
     llarena.arena_reserve(newaddr, totalsize)
     raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
     #
     # check if we need to write a hash value at the end of the new obj
     if tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD):
         if tid & GCFLAG_HASHFIELD:
             hash = (obj + objsize).signed[0]
         else:
             hash = llmemory.cast_adr_to_int(obj)
             tid |= GCFLAG_HASHFIELD
         (newaddr + totalsize).signed[0] = hash
         totalsize += llmemory.sizeof(lltype.Signed)
     #
     self.free += totalsize
     newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
     newhdr.tid = tid
     newobj = newaddr + self.size_gc_header()
     return newobj
Esempio n. 45
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2*nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks*step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks*step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Esempio n. 46
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Esempio n. 47
0
 def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
     assert step in (1, 2)
     llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
     page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
     if step == 1:
         page.nfree = 0
         nuninitialized = nblocks - nusedblocks
     else:
         page.nfree = nusedblocks
         nuninitialized = nblocks - 2 * nusedblocks
     page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
     if nusedblocks < nblocks:
         chainedlists = ac.page_for_size
     else:
         chainedlists = ac.full_page_for_size
     page.nextpage = chainedlists[size_class]
     page.arena = ac.current_arena
     chainedlists[size_class] = page
     if fill_with_objects:
         for i in range(0, nusedblocks * step, step):
             objaddr = pageaddr + hdrsize + i * size_block
             llarena.arena_reserve(objaddr, _dummy_size(size_block))
         if step == 2:
             prev = 'page.freeblock'
             for i in range(1, nusedblocks * step, step):
                 holeaddr = pageaddr + hdrsize + i * size_block
                 llarena.arena_reserve(holeaddr,
                                       llmemory.sizeof(llmemory.Address))
                 exec '%s = holeaddr' % prev in globals(), locals()
                 prevhole = holeaddr
                 prev = 'prevhole.address[0]'
             endaddr = pageaddr + hdrsize + 2 * nusedblocks * size_block
             exec '%s = endaddr' % prev in globals(), locals()
     assert ac._nuninitialized(page, size_class) == nuninitialized
Esempio n. 48
0
def test_replace_object_with_stub():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13*ssize, True)
    hdraddr = a + 3*ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
                                    lltype.Ptr(STUB))
    assert stub.t == '!'
Esempio n. 49
0
def test_replace_object_with_stub():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'
Esempio n. 50
0
    def update_forward_pointers(self, toaddr, num_of_alive_objs):
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        i = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            objsize = self.get_size(obj)
            totalsize = size_gc_header + objsize
            if not self.marked(obj):
                self.set_forwarding_address(obj, NULL, i)
                hdr.forward_ptr = NULL
            else:
                llarena.arena_reserve(toaddr, totalsize)
                self.set_forwarding_address(obj, toaddr, i)
                toaddr += totalsize
            i += 1
            fromaddr += totalsize

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_root,  # stack roots
            MarkCompactGC._update_root,  # static in prebuilt non-gc structures
            MarkCompactGC._update_root)  # static in prebuilt gc objects
        fromaddr = self.space
        i = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            objsize = self.get_size_from_backup(obj, i)
            totalsize = size_gc_header + objsize
            if not self.surviving(obj):
                pass
            else:
                self.trace_with_backup(obj, self._update_ref, i)
            fromaddr += totalsize
            i += 1
        return toaddr
Esempio n. 51
0
    def update_forward_pointers(self, toaddr, num_of_alive_objs):
        self.base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        i = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            objsize = self.get_size(obj)
            totalsize = size_gc_header + objsize
            if not self.marked(obj):
                self.set_null_forwarding_address(obj, i)
            else:
                llarena.arena_reserve(toaddr, totalsize)
                self.set_forwarding_address(obj, toaddr, i)
                toaddr += totalsize
            i += 1
            fromaddr += totalsize

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_root,  # stack roots
            MarkCompactGC._update_root,  # static in prebuilt non-gc structures
            MarkCompactGC._update_root)  # static in prebuilt gc objects
        fromaddr = self.space
        i = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            objsize = self.get_size_from_backup(obj, i)
            totalsize = size_gc_header + objsize
            if not self.surviving(obj):
                pass
            else:
                self.trace_with_backup(obj, self._update_ref, i)
            fromaddr += totalsize
            i += 1
        return toaddr
Esempio n. 52
0
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
    assert " " not in pagelayout.rstrip(" ")
    nb_pages = len(pagelayout)
    arenasize = pagesize * (nb_pages + 1) - 1
    ac = ArenaCollection(arenasize, pagesize, 9*WORD)
    #
    def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
        assert step in (1, 2)
        llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
        page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
        if step == 1:
            page.nfree = 0
            nuninitialized = nblocks - nusedblocks
        else:
            page.nfree = nusedblocks
            nuninitialized = nblocks - 2*nusedblocks
        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
        if nusedblocks < nblocks:
            chainedlists = ac.page_for_size
        else:
            chainedlists = ac.full_page_for_size
        page.nextpage = chainedlists[size_class]
        page.arena = ac.current_arena
        chainedlists[size_class] = page
        if fill_with_objects:
            for i in range(0, nusedblocks*step, step):
                objaddr = pageaddr + hdrsize + i * size_block
                llarena.arena_reserve(objaddr, _dummy_size(size_block))
            if step == 2:
                prev = 'page.freeblock'
                for i in range(1, nusedblocks*step, step):
                    holeaddr = pageaddr + hdrsize + i * size_block
                    llarena.arena_reserve(holeaddr,
                                          llmemory.sizeof(llmemory.Address))
                    exec '%s = holeaddr' % prev in globals(), locals()
                    prevhole = holeaddr
                    prev = 'prevhole.address[0]'
                endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
                exec '%s = endaddr' % prev in globals(), locals()
        assert ac._nuninitialized(page, size_class) == nuninitialized
    #
    ac.allocate_new_arena()
    num_initialized_pages = len(pagelayout.rstrip(" "))
    ac._startpageaddr = ac.current_arena.freepages
    if pagelayout.endswith(" "):
        ac.current_arena.freepages += pagesize * num_initialized_pages
    else:
        ac.current_arena.freepages = NULL
    ac.num_uninitialized_pages -= num_initialized_pages
    #
    for i in reversed(range(num_initialized_pages)):
        pageaddr = pagenum(ac, i)
        c = pagelayout[i]
        if '1' <= c <= '9':   # a partially used page (1 block free)
            size_class = int(c)
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks-1)
        elif c == '.':    # a free, but initialized, page
            llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
            pageaddr.address[0] = ac.current_arena.freepages
            ac.current_arena.freepages = pageaddr
            ac.current_arena.nfreepages += 1
        elif c == '#':    # a random full page, in the list 'full_pages'
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks)
        elif c == '/':    # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
            size_class = fill_with_objects or 1
            size_block = WORD * size_class
            nblocks = (pagesize - hdrsize) // size_block
            link(pageaddr, size_class, size_block, nblocks, nblocks // 3,
                 step=2)
    #
    ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
    return ac
Esempio n. 53
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0  # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Esempio n. 54
0
def test_arena():
    S = lltype.Struct('S', ('x',lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2*ssize+1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(ArenaError, arena_reserve, a + ssize + 1,  # misaligned
                   llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y',lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a+2*ssize+1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a+1, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a+2*ssize+1, llmemory.sizeof(S),
                   False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Esempio n. 55
0
 def reserve(i):
     b = a + i * llmemory.raw_malloc_usage(precomputed_size)
     arena_reserve(b, precomputed_size)
     return b
Esempio n. 56
0
 def reserve(i):
     b = a + i * llmemory.raw_malloc_usage(precomputed_size)
     arena_reserve(b, precomputed_size)
     return b
Esempio n. 57
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)
Esempio n. 58
0
 def walk_page(self, page, block_size, ok_to_free_func):
     """Walk over all objects in a page, and ask ok_to_free_func()."""
     #
     # 'freeblock' is the next free block
     freeblock = page.freeblock
     #
     # 'prevfreeblockat' is the address of where 'freeblock' was read from.
     prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
     prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
     #
     obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     obj += self.hdrsize
     surviving = 0    # initially
     skip_free_blocks = page.nfree
     #
     while True:
         #
         if obj == freeblock:
             #
             if skip_free_blocks == 0:
                 #
                 # 'obj' points to the first uninitialized block,
                 # or to the end of the page if there are none.
                 break
             #
             # 'obj' points to a free block.  It means that
             # 'prevfreeblockat.address[0]' does not need to be updated.
             # Just read the next free block from 'obj.address[0]'.
             skip_free_blocks -= 1
             prevfreeblockat = obj
             freeblock = obj.address[0]
             #
         else:
             # 'obj' points to a valid object.
             ll_assert(freeblock > obj,
                       "freeblocks are linked out of order")
             #
             if ok_to_free_func(obj):
                 #
                 # The object should die.
                 llarena.arena_reset(obj, _dummy_size(block_size), 0)
                 llarena.arena_reserve(obj,
                                       llmemory.sizeof(llmemory.Address))
                 # Insert 'obj' in the linked list of free blocks.
                 prevfreeblockat.address[0] = obj
                 prevfreeblockat = obj
                 obj.address[0] = freeblock
                 #
                 # Update the number of free objects in the page.
                 page.nfree += 1
                 #
             else:
                 # The object survives.
                 surviving += 1
         #
         obj += block_size
     #
     # Update the global total size of objects.
     self.total_memory_used += r_uint(surviving * block_size)
     #
     # Return the number of surviving objects.
     return surviving
Esempio n. 59
0
def test_arena_new_view():
    a = arena_malloc(50, False)
    arena_reserve(a, precomputed_size)
    # we can now allocate the same space in new view
    b = arena_new_view(a)
    arena_reserve(b, precomputed_size)