Пример #1
0
 def allocate_object(self, offset, size, letter='x'):
     self.check()
     bytes = llmemory.raw_malloc_usage(size)
     if offset + bytes > self.nbytes:
         raise ArenaError("object overflows beyond the end of the arena")
     zero = True
     for c in self.usagemap[offset:offset+bytes]:
         if c == '0':
             pass
         elif c == '#':
             zero = False
         else:
             raise ArenaError("new object overlaps a previous object")
     assert offset not in self.objectptrs
     addr2 = size._raw_malloc([], zero=zero)
     pattern = letter.upper() + letter*(bytes-1)
     self.usagemap[offset:offset+bytes] = array.array('c', pattern)
     self.setobject(addr2, offset, bytes)
     # common case: 'size' starts with a GCHeaderOffset.  In this case
     # we can also remember that the real object starts after the header.
     while isinstance(size, RoundedUpForAllocation):
         size = size.basesize
     if (isinstance(size, llmemory.CompositeOffset) and
         isinstance(size.offsets[0], llmemory.GCHeaderOffset)):
         objaddr = addr2 + size.offsets[0]
         hdrbytes = llmemory.raw_malloc_usage(size.offsets[0])
         objoffset = offset + hdrbytes
         self.setobject(objaddr, objoffset, bytes - hdrbytes)
     return addr2
Пример #2
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     if (has_finalizer or not can_collect or
         (raw_malloc_usage(size) > self.lb_young_var_basesize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   can_collect,
                                                   has_finalizer,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #3
0
 def malloc_fixedsize_clear(self,
                            typeid,
                            size,
                            can_collect,
                            has_finalizer=False,
                            contains_weakptr=False):
     if (has_finalizer or not can_collect or
         (raw_malloc_usage(size) > self.lb_young_var_basesize
          and raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   can_collect,
                                                   has_finalizer,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Пример #4
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     tid = self.get_type_id(addr)
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect = not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Пример #5
0
 def allocate_object(self, offset, size):
     self.check()
     bytes = llmemory.raw_malloc_usage(size)
     if offset + bytes > self.nbytes:
         raise ArenaError("object overflows beyond the end of the arena")
     zero = True
     for c in self.usagemap[offset:offset + bytes]:
         if c == '0':
             pass
         elif c == '#':
             zero = False
         else:
             raise ArenaError("new object overlaps a previous object")
     assert offset not in self.objectptrs
     addr2 = size._raw_malloc([], zero=zero)
     pattern = 'X' + 'x' * (bytes - 1)
     self.usagemap[offset:offset + bytes] = array.array('c', pattern)
     self.setobject(addr2, offset, bytes)
     # common case: 'size' starts with a GCHeaderOffset.  In this case
     # we can also remember that the real object starts after the header.
     while isinstance(size, RoundedUpForAllocation):
         size = size.basesize
     if (isinstance(size, llmemory.CompositeOffset)
             and isinstance(size.offsets[0], llmemory.GCHeaderOffset)):
         objaddr = addr2 + size.offsets[0]
         hdrbytes = llmemory.raw_malloc_usage(size.offsets[0])
         objoffset = offset + hdrbytes
         self.setobject(objaddr, objoffset, bytes - hdrbytes)
     return addr2
Пример #6
0
    def update_forward_pointers(self, toaddr, maxnum):
        self.base_forwarding_addr = base_forwarding_addr = toaddr
        fromaddr = self.space
        size_gc_header = self.gcheaderbuilder.size_gc_header
        num = 0
        while fromaddr < self.free:
            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
            obj = fromaddr + size_gc_header
            # compute the original object size, including the
            # optional hash field
            basesize = size_gc_header + self.get_size(obj)
            totalsrcsize = basesize
            if hdr.tid & GCFLAG_HASHFIELD:  # already a hash field, copy it too
                totalsrcsize += llmemory.sizeof(lltype.Signed)
            #
            if self.marked(obj):
                # the object is marked as suriving.  Compute the new object
                # size
                totaldstsize = totalsrcsize
                if hdr.tid & (GCFLAG_HASHTAKEN | GCFLAG_HASHFIELD) == GCFLAG_HASHTAKEN:
                    # grow a new hash field -- with the exception: if
                    # the object actually doesn't move, don't
                    # (otherwise, we get a bogus toaddr > fromaddr)
                    if self.toaddr_smaller_than_fromaddr(toaddr, fromaddr):
                        totaldstsize += llmemory.sizeof(lltype.Signed)
                #
                if not translated_to_c():
                    llarena.arena_reserve(toaddr, basesize)
                    if raw_malloc_usage(totaldstsize) > raw_malloc_usage(basesize):
                        llarena.arena_reserve(toaddr + basesize, llmemory.sizeof(lltype.Signed))
                #
                # save the field hdr.tid in the array tid_backup
                ll_assert(num < maxnum, "overflow of the tid_backup table")
                self.tid_backup[num] = self.get_type_id(obj)
                num += 1
                # compute forward_offset, the offset to the future copy
                # of this object
                forward_offset = toaddr - base_forwarding_addr
                # copy the first two gc flags in forward_offset
                ll_assert(forward_offset & 3 == 0, "misalignment!")
                forward_offset |= (hdr.tid >> first_gcflag_bit) & 3
                hdr.tid = forward_offset | GCFLAG_MARKBIT
                ll_assert(self.marked(obj), "re-marking object failed!")
                # done
                toaddr += totaldstsize
            #
            fromaddr += totalsrcsize
            if not translated_to_c():
                assert toaddr - base_forwarding_addr <= fromaddr - self.space
        self.num_alive_objs = num
        self.finaladdr = toaddr

        # now update references
        self.root_walker.walk_roots(
            MarkCompactGC._update_ref,  # stack roots
            MarkCompactGC._update_ref,  # static in prebuilt non-gc structures
            MarkCompactGC._update_ref,
        )  # static in prebuilt gc objects
        self.walk_marked_objects(MarkCompactGC.trace_and_update_ref)
Пример #7
0
 def _get_totalsize_var(self, nonvarsize, itemsize, length):
     try:
         varsize = ovfcheck(itemsize * length)
     except OverflowError:
         raise MemoryError
     # Careful to detect overflows.  The following works even if varsize
     # is almost equal to sys.maxint; morever, self.space_size is known
     # to be at least 4095 bytes smaller than sys.maxint, so this function
     # always raises instead of returning an integer >= sys.maxint-4095.
     if raw_malloc_usage(varsize) > self.space_size - raw_malloc_usage(nonvarsize):
         raise MemoryError
     return llarena.round_up_for_allocation(nonvarsize + varsize)
Пример #8
0
 def _get_totalsize_var(self, nonvarsize, itemsize, length):
     try:
         varsize = ovfcheck(itemsize * length)
     except OverflowError:
         raise MemoryError
     # Careful to detect overflows.  The following works even if varsize
     # is almost equal to sys.maxint; morever, self.space_size is known
     # to be at least 4095 bytes smaller than sys.maxint, so this function
     # always raises instead of returning an integer >= sys.maxint-4095.
     if (raw_malloc_usage(varsize) >
             self.space_size - raw_malloc_usage(nonvarsize)):
         raise MemoryError
     return llarena.round_up_for_allocation(nonvarsize + varsize)
Пример #9
0
    def malloc_varsize_clear(self,
                             typeid,
                             length,
                             size,
                             itemsize,
                             offset_to_length,
                             can_collect,
                             has_finalizer=False):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))

            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (has_finalizer or not can_collect or too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize
             and raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                        llmemory.GCREF)
Пример #10
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)

    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b

    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1],
                llmemory.raw_malloc_usage(precomputed_size) * 2, False)
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")  # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")  # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0],
                llmemory.raw_malloc_usage(precomputed_size) * 2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
                   "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Пример #11
0
    def malloc_varsize_clear(self,
                             typeid,
                             length,
                             size,
                             itemsize,
                             offset_to_length,
                             can_collect,
                             has_finalizer=False):
        if has_finalizer or not can_collect:
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Пример #12
0
 def try_obtain_free_space(self, needed):
     needed = raw_malloc_usage(needed)
     while 1:
         self.markcompactcollect(needed)
         missing = needed - (self.top_of_space - self.free)
         if missing < 0:
             return True
Пример #13
0
 def malloc_varsize_clear(self,
                          typeid,
                          length,
                          size,
                          itemsize,
                          offset_to_length,
                          can_collect,
                          has_finalizer=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         if not can_collect:
             raise memoryError
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Пример #14
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0
                                    or ((tid & GCFLAG_HASHTAKEN) != 0
                                        and addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Пример #15
0
 def try_obtain_free_space(self, needed):
     needed = raw_malloc_usage(needed)
     while 1:
         self.markcompactcollect(needed)
         missing = needed - (self.top_of_space - self.free)
         if missing < 0:
             return True
Пример #16
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Пример #17
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size and
         self.double_space_size()):
         pass    # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True      # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False    # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                      "double_space_size() failed to do its job")
         return True
Пример #18
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        newaddr = self.allocate_external_object(totalsize)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        if DEBUG_PRINT:
            self._nonmoving_copy_count += 1
            self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Пример #19
0
 def malloc_varsize_slowpath(self,
                             typeid,
                             length,
                             force_nonmovable=False,
                             resizable=False):
     # For objects that are too large, or when the nursery is exhausted.
     # In order to keep malloc_varsize_clear() as compact as possible,
     # we recompute what we need in this slow path instead of passing
     # it all as function arguments.
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + self.fixed_size(typeid)
     itemsize = self.varsize_item_sizes(typeid)
     offset_to_length = self.varsize_offset_to_length(typeid)
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     if self.has_gcptr_in_varsize(typeid):
         nonlarge_max = self.nonlarge_gcptrs_max
     else:
         nonlarge_max = self.nonlarge_max
     if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
         result = self.malloc_varsize_marknsweep(totalsize, resizable)
         flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
     else:
         result = self.malloc_varsize_collecting_nursery(totalsize)
         flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
     self.init_gc_object(result, typeid, flags)
     (result + size_gc_header + offset_to_length).signed[0] = length
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Пример #20
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Пример #21
0
 def reset(self, zero, start=0, size=None):
     self.check()
     if size is None:
         stop = self.nbytes
     else:
         stop = start + llmemory.raw_malloc_usage(size)
     assert 0 <= start <= stop <= self.nbytes
     for offset, ptr in self.objectptrs.items():
         size = self.objectsizes[offset]
         if offset < start:  # object is before the cleared area
             assert offset + size <= start, "object overlaps cleared area"
         elif offset + size > stop:  # object is after the cleared area
             assert offset >= stop, "object overlaps cleared area"
         else:
             obj = ptr._obj
             _dictdel(Arena.object_arena_location, obj)
             del self.objectptrs[offset]
             del self.objectsizes[offset]
             obj._free()
     if zero:
         initialbyte = "0"
     else:
         initialbyte = "#"
     self.usagemap[start:stop] = array.array('c',
                                             initialbyte * (stop - start))
Пример #22
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size
             and self.double_space_size()):
         pass  # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True  # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False  # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                   "double_space_size() failed to do its job")
         return True
Пример #23
0
 def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False,
                      contains_weakptr=False):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         tot_size = size_gc_header + size
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid = typeid << 1
     if has_finalizer:
         hdr.next = self.malloced_objects_with_finalizer
         self.malloced_objects_with_finalizer = hdr
     elif contains_weakptr:
         hdr.next = self.objects_with_weak_pointers
         self.objects_with_weak_pointers = hdr
     else:
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid, tot_size, result, False)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Пример #24
0
 def malloc_varsize_slowpath(self, typeid, length, force_nonmovable=False):
     # For objects that are too large, or when the nursery is exhausted.
     # In order to keep malloc_varsize_clear() as compact as possible,
     # we recompute what we need in this slow path instead of passing
     # it all as function arguments.
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + self.fixed_size(typeid)
     itemsize = self.varsize_item_sizes(typeid)
     offset_to_length = self.varsize_offset_to_length(typeid)
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     if self.has_gcptr_in_varsize(typeid):
         nonlarge_max = self.nonlarge_gcptrs_max
     else:
         nonlarge_max = self.nonlarge_max
     if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
         result = self.malloc_varsize_marknsweep(totalsize)
         flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
     else:
         result = self.malloc_varsize_collecting_nursery(totalsize)
         flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
     self.init_gc_object(result, typeid, flags)
     (result + size_gc_header + offset_to_length).signed[0] = length
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #25
0
 def malloc_fixedsize(self, typeid16, size, can_collect,
                      has_finalizer=False, contains_weakptr=False):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         tot_size = size_gc_header + size
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid16 = typeid16
     hdr.mark = False
     hdr.flags = '\x00'
     if has_finalizer:
         hdr.next = self.malloced_objects_with_finalizer
         self.malloced_objects_with_finalizer = hdr
     elif contains_weakptr:
         hdr.next = self.objects_with_weak_pointers
         self.objects_with_weak_pointers = hdr
     else:
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid16, tot_size, result, False)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Пример #26
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                          offset_to_length, can_collect):
     if can_collect:
         self.maybe_collect()
     size_gc_header = self.gcheaderbuilder.size_gc_header
     try:
         fixsize = size_gc_header + size
         varsize = ovfcheck(itemsize * length)
         tot_size = ovfcheck(fixsize + varsize)
         usage = raw_malloc_usage(tot_size)
         bytes_malloced = ovfcheck(self.bytes_malloced+usage)
         ovfcheck(self.heap_usage + bytes_malloced)
     except OverflowError:
         raise memoryError
     result = raw_malloc(tot_size)
     if not result:
         raise memoryError
     raw_memclear(result, tot_size)        
     (result + size_gc_header + offset_to_length).signed[0] = length
     hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
     hdr.typeid16 = typeid16
     hdr.mark = False
     hdr.flags = '\x00'
     hdr.next = self.malloced_objects
     self.malloced_objects = hdr
     self.bytes_malloced = bytes_malloced
         
     result += size_gc_header
     #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
     #                 'typeid', typeid16,
     #                 '->', llmemory.cast_adr_to_int(result))
     self.write_malloc_statistics(typeid16, tot_size, result, True)
     return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Пример #27
0
    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                             offset_to_length):
        self.maybe_collect()
        size_gc_header = self.gcheaderbuilder.size_gc_header
        try:
            fixsize = size_gc_header + size
            varsize = ovfcheck(itemsize * length)
            tot_size = ovfcheck(fixsize + varsize)
            usage = raw_malloc_usage(tot_size)
            bytes_malloced = ovfcheck(self.bytes_malloced + usage)
            ovfcheck(self.heap_usage + bytes_malloced)
        except OverflowError:
            raise memoryError
        result = raw_malloc(tot_size)
        if not result:
            raise memoryError
        raw_memclear(result, tot_size)
        (result + size_gc_header + offset_to_length).signed[0] = length
        hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
        hdr.typeid16 = typeid16
        hdr.mark = False
        hdr.flags = '\x00'
        hdr.next = self.malloced_objects
        self.malloced_objects = hdr
        self.bytes_malloced = bytes_malloced

        result += size_gc_header
        #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
        #                 'typeid', typeid16,
        #                 '->', llmemory.cast_adr_to_int(result))
        self.write_malloc_statistics(typeid16, tot_size, result, True)
        return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
Пример #28
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
                                    ((tid & GCFLAG_HASHTAKEN) != 0 and
                                     addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Пример #29
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        newaddr = self.allocate_external_object(totalsize)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        if self.config.gcconfig.debugprint:
            self._nonmoving_copy_count += 1
            self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Пример #30
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty() or
         self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(size_of_alive_objs, needed +
                                     num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs,
             lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space,
             lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty() or
         self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space        = toaddr
     self.free         = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Пример #31
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(
         raw_malloc_usage(totalsize) <= self.nursery_top - result,
         "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
     return result
Пример #32
0
def test_partial_arena_reset():
    a = arena_malloc(50, False)
    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b
    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1], llmemory.raw_malloc_usage(precomputed_size)*2, False)
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0], llmemory.raw_malloc_usage(precomputed_size)*2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Пример #33
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length, can_collect,
                             has_finalizer=False):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))
            
            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (has_finalizer or not can_collect or
            too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize and
             raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #34
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Пример #35
0
 def markcompactcollect(self, needed=0):
     start_time = self.debug_collect_start()
     self.debug_check_consistency()
     self.to_see = self.AddressStack()
     self.mark_roots_recursively()
     if (self.objects_with_finalizers.non_empty()
             or self.run_finalizers.non_empty()):
         self.mark_objects_with_finalizers()
         self._trace_and_mark()
     self.to_see.delete()
     num_of_alive_objs = self.compute_alive_objects()
     size_of_alive_objs = self.totalsize_of_objs
     totalsize = self.new_space_size(
         size_of_alive_objs, needed + num_of_alive_objs * BYTES_PER_TID)
     tid_backup_size = (llmemory.sizeof(self.TID_BACKUP, 0) +
                        llmemory.sizeof(TID_TYPE) * num_of_alive_objs)
     used_space_now = self.next_collect_after + raw_malloc_usage(
         tid_backup_size)
     if totalsize >= self.space_size or used_space_now >= self.space_size:
         toaddr = self.double_space_size(totalsize)
         llarena.arena_reserve(toaddr + size_of_alive_objs, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             toaddr + size_of_alive_objs, lltype.Ptr(self.TID_BACKUP))
         resizing = True
     else:
         toaddr = llarena.arena_new_view(self.space)
         llarena.arena_reserve(self.top_of_space, tid_backup_size)
         self.tid_backup = llmemory.cast_adr_to_ptr(
             self.top_of_space, lltype.Ptr(self.TID_BACKUP))
         resizing = False
     self.next_collect_after = totalsize
     weakref_offsets = self.collect_weakref_offsets()
     finaladdr = self.update_forward_pointers(toaddr, num_of_alive_objs)
     if (self.run_finalizers.non_empty()
             or self.objects_with_finalizers.non_empty()):
         self.update_run_finalizers()
     if self.objects_with_weakrefs.non_empty():
         self.invalidate_weakrefs(weakref_offsets)
     self.update_objects_with_id()
     self.compact(resizing)
     if not resizing:
         size = toaddr + self.space_size - finaladdr
         llarena.arena_reset(finaladdr, size, True)
     else:
         if we_are_translated():
             # because we free stuff already in raw_memmove, we
             # would get double free here. Let's free it anyway
             llarena.arena_free(self.space)
         llarena.arena_reset(toaddr + size_of_alive_objs, tid_backup_size,
                             True)
     self.space = toaddr
     self.free = finaladdr
     self.top_of_space = toaddr + self.next_collect_after
     self.debug_check_consistency()
     self.tid_backup = lltype.nullptr(self.TID_BACKUP)
     if self.run_finalizers.non_empty():
         self.execute_finalizers()
     self.debug_collect_finish(start_time)
Пример #36
0
 def track_heap(self, adr):
     if self._tracked_dict.contains(adr):
         return
     self._tracked_dict.add(adr)
     idx = llop.get_member_index(lltype.Signed, self.get_type_id(adr))
     self._ll_typeid_map[idx].count += 1
     totsize = self.get_size(adr) + self.size_gc_header()
     self._ll_typeid_map[idx].size += llmemory.raw_malloc_usage(totsize)
     self.trace(adr, self.track_heap_parent, adr)
Пример #37
0
 def track_heap(self, adr):
     if self._tracked_dict.contains(adr):
         return
     self._tracked_dict.add(adr)
     idx = llop.get_member_index(lltype.Signed, self.get_type_id(adr))
     self._ll_typeid_map[idx].count += 1
     totsize = self.get_size(adr) + self.size_gc_header()
     self._ll_typeid_map[idx].size += llmemory.raw_malloc_usage(totsize)
     self.trace(adr, self.track_heap_parent, adr)
Пример #38
0
 def __sub__(self, other):
     if isinstance(other, llmemory.AddressOffset):
         other = llmemory.raw_malloc_usage(other)
     if isinstance(other, (int, long)):
         return self.arena.getaddr(self.offset - other)
     if isinstance(other, fakearenaaddress):
         if self.arena is not other.arena:
             raise ArenaError("The two addresses are from different arenas")
         return self.offset - other.offset
     return NotImplemented
Пример #39
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length, can_collect,
                             has_finalizer=False):
        if has_finalizer or not can_collect:
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length,
                                                    can_collect, has_finalizer)
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result+size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Пример #40
0
 def __sub__(self, other):
     if isinstance(other, llmemory.AddressOffset):
         other = llmemory.raw_malloc_usage(other)
     if is_valid_int(other):
         return self.arena.getaddr(self.offset - other)
     if isinstance(other, fakearenaaddress):
         if self.arena is not other.arena:
             raise ArenaError("The two addresses are from different arenas")
         return self.offset - other.offset
     return NotImplemented
Пример #41
0
 def __init__(self, arena_size, page_size, small_request_threshold):
     # 'small_request_threshold' is the largest size that we
     # can ask with self.malloc().
     self.arena_size = arena_size
     self.page_size = page_size
     self.small_request_threshold = small_request_threshold
     #
     # 'pageaddr_for_size': for each size N between WORD and
     # small_request_threshold (included), contains either NULL or
     # a pointer to a page that has room for at least one more
     # allocation of the given size.
     length = small_request_threshold / WORD + 1
     self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
                                        flavor='raw', zero=True,
                                        immortal=True)
     self.full_page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
                                             flavor='raw', zero=True,
                                             immortal=True)
     self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
                                           length, flavor='raw',
                                           immortal=True)
     self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
     assert page_size > self.hdrsize
     self.nblocks_for_size[0] = 0    # unused
     for i in range(1, length):
         self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
     #
     self.max_pages_per_arena = arena_size // page_size
     self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                       self.max_pages_per_arena,
                                       flavor='raw', zero=True,
                                       immortal=True)
     # this is used in mass_free() only
     self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                           self.max_pages_per_arena,
                                           flavor='raw', zero=True,
                                           immortal=True)
     #
     # the arena currently consumed; it must have at least one page
     # available, or be NULL.  The arena object that we point to is
     # not in any 'arenas_lists'.  We will consume all its pages before
     # we choose a next arena, even if there is a major collection
     # in-between.
     self.current_arena = ARENA_NULL
     #
     # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty
     self.min_empty_nfreepages = self.max_pages_per_arena
     #
     # part of current_arena might still contain uninitialized pages
     self.num_uninitialized_pages = 0
     #
     # the total memory used, counting every block in use, without
     # the additional bookkeeping stuff.
     self.total_memory_used = r_uint(0)
Пример #42
0
 def _get_memory(self, totalsize):
     # also counts the space that will be needed during the following
     # collection to store the TID
     requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
     self.next_collect_after -= requested_size
     if self.next_collect_after < 0:
         result = self.obtain_free_space(requested_size)
     else:
         result = self.free
     self.free += totalsize
     llarena.arena_reserve(result, totalsize)
     return result
Пример #43
0
 def _get_memory(self, totalsize):
     # also counts the space that will be needed during the following
     # collection to store the TID
     requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
     self.next_collect_after -= requested_size
     if self.next_collect_after < 0:
         result = self.obtain_free_space(requested_size)
     else:
         result = self.free
     self.free += totalsize
     llarena.arena_reserve(result, totalsize)
     return result
Пример #44
0
 def malloc_fixedsize_clear(self, typeid16, size, has_finalizer=False, contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Пример #45
0
 def shrink_obj(self, offset, newsize):
     oldbytes = self.objectsizes[offset]
     newbytes = llmemory.raw_malloc_usage(newsize)
     assert newbytes <= oldbytes
     # fix self.objectsizes
     for i in range(newbytes):
         adr = offset + i
         if adr in self.objectsizes:
             assert self.objectsizes[adr] == oldbytes - i
             self.objectsizes[adr] = newbytes - i
     # fix self.usagemap
     for i in range(offset + newbytes, offset + oldbytes):
         assert self.usagemap[i] == 'x'
         self.usagemap[i] = '#'
Пример #46
0
 def shrink_obj(self, offset, newsize):
     oldbytes = self.objectsizes[offset]
     newbytes = llmemory.raw_malloc_usage(newsize)
     assert newbytes <= oldbytes
     # fix self.objectsizes
     for i in range(newbytes):
         adr = offset + i
         if adr in self.objectsizes:
             assert self.objectsizes[adr] == oldbytes - i
             self.objectsizes[adr] = newbytes - i
     # fix self.usagemap
     for i in range(offset + newbytes, offset + oldbytes):
         assert self.usagemap[i] == 'x'
         self.usagemap[i] = '#'
Пример #47
0
def arena_reserve(addr, size, check_alignment=True):
    """Mark some bytes in an arena as reserved, and returns addr.
    For debugging this can check that reserved ranges of bytes don't
    overlap.  The size must be symbolic; in non-translated version
    this is used to know what type of lltype object to allocate."""
    from pypy.rpython.memory.lltypelayout import memory_alignment
    addr = getfakearenaaddress(addr)
    letter = 'x'
    if llmemory.raw_malloc_usage(size) == 1:
        letter = 'b'  # for Byte-aligned allocations
    elif check_alignment and (addr.offset & (memory_alignment - 1)) != 0:
        raise ArenaError("object at offset %d would not be correctly aligned" %
                         (addr.offset, ))
    addr.arena.allocate_object(addr.offset, size, letter)
Пример #48
0
def arena_reserve(addr, size, check_alignment=True):
    """Mark some bytes in an arena as reserved, and returns addr.
    For debugging this can check that reserved ranges of bytes don't
    overlap.  The size must be symbolic; in non-translated version
    this is used to know what type of lltype object to allocate."""
    from pypy.rpython.memory.lltypelayout import memory_alignment
    addr = getfakearenaaddress(addr)
    letter = 'x'
    if llmemory.raw_malloc_usage(size) == 1:
        letter = 'b'    # for Byte-aligned allocations
    elif check_alignment and (addr.offset & (memory_alignment-1)) != 0:
        raise ArenaError("object at offset %d would not be correctly aligned"
                         % (addr.offset,))
    addr.arena.allocate_object(addr.offset, size, letter)
Пример #49
0
 def malloc_varsize_marknsweep(self, totalsize):
     # In order to free the large objects from time to time, we
     # arbitrarily force a full collect() if none occurs when we have
     # allocated self.space_size + rawmalloced bytes of large objects.
     self._check_rawsize_alloced(raw_malloc_usage(totalsize))
     result = self.allocate_external_object(totalsize)
     if not result:
         raise MemoryError()
     # The parent classes guarantee zero-filled allocations, so we
     # need to follow suit.
     llmemory.raw_memclear(result, totalsize)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     self.gen2_rawmalloced_objects.append(result + size_gc_header)
     return result
Пример #50
0
 def malloc_varsize_marknsweep(self, totalsize):
     # In order to free the large objects from time to time, we
     # arbitrarily force a full collect() if none occurs when we have
     # allocated self.space_size + rawmalloced bytes of large objects.
     self._check_rawsize_alloced(raw_malloc_usage(totalsize))
     result = self.allocate_external_object(totalsize)
     if not result:
         raise MemoryError()
     # The parent classes guarantee zero-filled allocations, so we
     # need to follow suit.
     llmemory.raw_memclear(result, totalsize)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     self.gen2_rawmalloced_objects.append(result + size_gc_header)
     return result
Пример #51
0
 def compute_alive_objects(self):
     fromaddr = self.space
     totalsize = 0
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             totalsize += raw_malloc_usage(objtotalsize)
         num += 1
         fromaddr += objtotalsize
     self.totalsize_of_objs = totalsize
     return num
Пример #52
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Пример #53
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         if not can_collect:
             raise memoryError
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid)
     self.free = result + totalsize
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #54
0
 def __add__(self, other):
     if is_valid_int(other):
         position = self.offset + other
     elif isinstance(other, llmemory.AddressOffset):
         # this is really some Do What I Mean logic.  There are two
         # possible meanings: either we want to go past the current
         # object in the arena, or we want to take the address inside
         # the current object.  Try to guess...
         bytes = llmemory.raw_malloc_usage(other)
         if (self.offset in self.arena.objectsizes
                 and bytes < self.arena.objectsizes[self.offset]):
             # looks like we mean "inside the object"
             return llmemory.fakeaddress.__add__(self, other)
         position = self.offset + bytes
     else:
         return NotImplemented
     return self.arena.getaddr(position)
Пример #55
0
 def __add__(self, other):
     if isinstance(other, (int, long)):
         position = self.offset + other
     elif isinstance(other, llmemory.AddressOffset):
         # this is really some Do What I Mean logic.  There are two
         # possible meanings: either we want to go past the current
         # object in the arena, or we want to take the address inside
         # the current object.  Try to guess...
         bytes = llmemory.raw_malloc_usage(other)
         if (self.offset in self.arena.objectsizes and
             bytes < self.arena.objectsizes[self.offset]):
             # looks like we mean "inside the object"
             return llmemory.fakeaddress.__add__(self, other)
         position = self.offset + bytes
     else:
         return NotImplemented
     return self.arena.getaddr(position)
Пример #56
0
 def malloc_fixedsize_clear(self, typeid16, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #57
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #58
0
 def malloc_varsize_marknsweep(self, totalsize, resizable=False):
     # In order to free the large objects from time to time, we
     # arbitrarily force a full collect() if none occurs when we have
     # allocated 'self.space_size' bytes of large objects.
     # XXX we should probably track the total raw_malloc'ed size
     # XXX and adjust sizes based on it; otherwise we risk doing
     # XXX many many collections if the program allocates a lot
     # XXX more than the current self.space_size.
     self._check_rawsize_alloced(raw_malloc_usage(totalsize))
     result = self.allocate_external_object(totalsize)
     if not result:
         raise MemoryError()
     # The parent classes guarantee zero-filled allocations, so we
     # need to follow suit.
     llmemory.raw_memclear(result, totalsize)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     if resizable:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     return result
Пример #59
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #60
0
def test_replace_object_with_stub():
    from pypy.rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'