Esempio n. 1
0
 def trace_partial(self, obj, start, stop, callback, arg):
     """Like trace(), but only walk the array part, for indices in
     range(start, stop).  Must only be called if has_gcptr_in_varsize().
     """
     length = stop - start
     typeid = self.get_type_id(obj)
     if self.is_gcarrayofgcptr(typeid):
         # a performance shortcut for GcArray(gcptr)
         item = obj + llmemory.gcarrayofptr_itemsoffset
         item += llmemory.gcarrayofptr_singleitemoffset * start
         while length > 0:
             if self.points_to_valid_gc_object(item):
                 callback(item, arg)
             item += llmemory.gcarrayofptr_singleitemoffset
             length -= 1
         return
     ll_assert(self.has_gcptr_in_varsize(typeid),
               "trace_partial() on object without has_gcptr_in_varsize()")
     item = obj + self.varsize_offset_to_variable_part(typeid)
     offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
     itemlength = self.varsize_item_sizes(typeid)
     item += itemlength * start
     while length > 0:
         j = 0
         while j < len(offsets):
             itemobj = item + offsets[j]
             if self.points_to_valid_gc_object(itemobj):
                 callback(itemobj, arg)
             j += 1
         item += itemlength
         length -= 1
Esempio n. 2
0
def get_result_suspstack(h):
    # Now we are in the target, after the switch() or the new().
    # Note that this whole module was carefully written in such a way as
    # not to invoke pushing/popping things off the shadowstack at
    # unexpected moments...
    oldsuspstack = gcrootfinder.oldsuspstack
    newsuspstack = gcrootfinder.newsuspstack
    gcrootfinder.oldsuspstack = NULL_SUSPSTACK
    gcrootfinder.newsuspstack = NULL_SUSPSTACK
    if not h:
        raise MemoryError
    # We still have the old shadowstack active at this point; save it
    # away, and restore the new one
    if oldsuspstack:
        ll_assert(not _c.is_empty_handle(h),"unexpected empty stacklet handle")
        h = llmemory.cast_ptr_to_adr(h)
        llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h)
    else:
        ll_assert(_c.is_empty_handle(h),"unexpected non-empty stacklet handle")
        llop.gc_forget_current_state(lltype.Void)
    #
    llop.gc_restore_state_from(lltype.Void, newsuspstack)
    #
    # From this point on, 'newsuspstack' is consumed and done, its
    # shadow stack installed as the current one.  It should not be
    # used any more.  For performance, we avoid it being deallocated
    # by letting it be reused on the next switch.
    gcrootfinder.oldsuspstack = newsuspstack
    # Return.
    return oldsuspstack
Esempio n. 3
0
 def replay(self, label, kinds):
     builder = LLBuilder(self, label.g, llimpl.nullblock)
     args_gv = builder._newblock(kinds)
     ll_assert(self.currently_writing is None,
                  "replay: currently_writing")
     self.currently_writing = builder
     return builder, args_gv
Esempio n. 4
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False    # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size    # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                         "unexpected growth of GC space usage during collect")
            return False     # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True    # success
Esempio n. 5
0
 def genraisingop1(self, opname, gv_arg):
     ll_assert(self.rgenop.currently_writing is self,
                  "genraisingop1: bad currently_writing")
     gv_res = LLVar(llimpl.genop(self.b, opname, [gv_arg], llimpl.guess))
     gv_exc = LLVar(llimpl.genop(self.b, "check_and_clear_exc", [],
                                 gv_Bool.v))
     return gv_res, gv_exc
Esempio n. 6
0
 def restore_state_from(self, shadowstackref):
     ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
     ll_assert(shadowstackref.base <= shadowstackref.top,
               "restore_state_from: broken shadowstack")
     self.gcdata.root_stack_base = shadowstackref.base
     self.gcdata.root_stack_top = shadowstackref.top
     self._cleanup(shadowstackref)
Esempio n. 7
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                   "obtain_free_space failed to do its job")
     if self.nursery:
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "minor collect")
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         self.notify_objects_just_moved()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, True)
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "percent survived:",
                              float(scan - beginning) / self.nursery_size)
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     return self.nursery_free
def get_result_suspstack(h):
    # Now we are in the target, after the switch() or the new().
    # Note that this whole module was carefully written in such a way as
    # not to invoke pushing/popping things off the shadowstack at
    # unexpected moments...
    oldsuspstack = gcrootfinder.oldsuspstack
    newsuspstack = gcrootfinder.newsuspstack
    gcrootfinder.oldsuspstack = NULL_SUSPSTACK
    gcrootfinder.newsuspstack = NULL_SUSPSTACK
    if not h:
        raise MemoryError
    # We still have the old shadowstack active at this point; save it
    # away, and restore the new one
    if oldsuspstack:
        ll_assert(not _c.is_empty_handle(h), "unexpected empty stacklet handle")
        h = llmemory.cast_ptr_to_adr(h)
        llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h)
    else:
        ll_assert(_c.is_empty_handle(h), "unexpected non-empty stacklet handle")
        llop.gc_forget_current_state(lltype.Void)
    #
    llop.gc_restore_state_from(lltype.Void, newsuspstack)
    #
    # From this point on, 'newsuspstack' is consumed and done, its
    # shadow stack installed as the current one.  It should not be
    # used any more.  For performance, we avoid it being deallocated
    # by letting it be reused on the next switch.
    gcrootfinder.oldsuspstack = newsuspstack
    # Return.
    return oldsuspstack
Esempio n. 9
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0
                                    or ((tid & GCFLAG_HASHTAKEN) != 0
                                        and addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Esempio n. 10
0
 def attach_handle_on_suspstack(self, handle):
     s = self.suspstack
     self.suspstack = NULL_SUSPSTACK
     ll_assert(bool(s.anchor), "s.anchor should not be null")
     s.handle = handle
     llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(s))
     return s
Esempio n. 11
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Esempio n. 12
0
 def _really_force(self):
     if self.source_op is None:
         # this case should not occur; I only managed to get it once
         # in pypy-c-jit and couldn't reproduce it.  The point is
         # that it relies on optimizefindnode.py computing exactly
         # the right level of specialization, and it seems that there
         # is still a corner case where it gets too specialized for
         # optimizeopt.py.  Let's not crash in release-built
         # pypy-c-jit's.  XXX find out when
         from pypy.rlib.debug import ll_assert
         ll_assert(False, "_really_force: source_op is None")
         raise InvalidLoop
     #
     newoperations = self.optimizer.newoperations
     newoperations.append(self.source_op)
     self.box = box = self.source_op.result
     #
     iteritems = self._fields.iteritems()
     if not we_are_translated(): #random order is fine, except for tests
         iteritems = list(iteritems)
         iteritems.sort(key = lambda (x,y): x.sort_key())
     for ofs, value in iteritems:
         if value.is_null():
             continue
         subbox = value.force_box()
         op = ResOperation(rop.SETFIELD_GC, [box, subbox], None,
                           descr=ofs)
         newoperations.append(op)
     self._fields = None
Esempio n. 13
0
 def compute_alive_objects(self):
     fromaddr = self.space
     addraftercollect = self.space
     num = 1
     while fromaddr < self.free:
         size_gc_header = self.gcheaderbuilder.size_gc_header
         tid = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR)).tid
         obj = fromaddr + size_gc_header
         objsize = self.get_size(obj)
         objtotalsize = size_gc_header + objsize
         if self.marked(obj):
             copy_has_hash_field = ((tid & GCFLAG_HASHFIELD) != 0 or
                                    ((tid & GCFLAG_HASHTAKEN) != 0 and
                                     addraftercollect < fromaddr))
             addraftercollect += raw_malloc_usage(objtotalsize)
             if copy_has_hash_field:
                 addraftercollect += llmemory.sizeof(lltype.Signed)
         num += 1
         fromaddr += objtotalsize
         if tid & GCFLAG_HASHFIELD:
             fromaddr += llmemory.sizeof(lltype.Signed)
     ll_assert(addraftercollect <= fromaddr,
               "markcompactcollect() is trying to increase memory usage")
     self.totalsize_of_objs = addraftercollect - self.space
     return num
Esempio n. 14
0
def ll_extend_with_str_slice(lst, s, getstrlen, getstritem, slice):
    start = slice.start
    stop = slice.stop
    len1 = lst.ll_length()
    len2 = getstrlen(s)
    ll_assert(start >= 0, "unexpectedly negative str slice start")
    ll_assert(start <= len2, "str slice start larger than str length")
    if stop > len2:
        stop = len2
    count2 = stop - start
    assert count2 >= 0, "str slice stop smaller than start"
    try:
        newlength = ovfcheck(len1 + count2)
    except OverflowError:
        raise MemoryError
    lst._ll_resize_ge(newlength)
    i = start
    j = len1
    while i < stop:
        c = getstritem(s, i)
        if listItemType(lst) is UniChar:
            c = unichr(ord(c))
        lst.ll_setitem_fast(j, c)
        i += 1
        j += 1
Esempio n. 15
0
 def genop_ptr_ne(self, gv_PTRTYPE, gv_ptr1, gv_ptr2):
     ll_assert(self.rgenop.currently_writing is self,
                  "genop_ptr_ne: bad currently_writing")
     gv_ptr1 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr1.v)
     gv_ptr2 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr2.v)        
     return LLVar(llimpl.genop(self.b, 'ptr_ne', [gv_ptr1, gv_ptr2],
                               gv_Bool.v))
Esempio n. 16
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     ll_assert(self.header(addr).tid & GCFLAG_EXTERNAL,
               "realloc() on a non-external object")
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect = not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
Esempio n. 17
0
 def genop_ptr_ne(self, gv_PTRTYPE, gv_ptr1, gv_ptr2):
     ll_assert(self.rgenop.currently_writing is self,
               "genop_ptr_ne: bad currently_writing")
     gv_ptr1 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr1.v)
     gv_ptr2 = llimpl.cast(self.b, gv_PTRTYPE.v, gv_ptr2.v)
     return LLVar(
         llimpl.genop(self.b, 'ptr_ne', [gv_ptr1, gv_ptr2], gv_Bool.v))
Esempio n. 18
0
 def f(i):
     b = B()
     lst = [i]
     lst[0] += 1
     b.y = lst
     ll_assert(b.y is lst, "copying when reading out the attr?")
     return b.y[0]
Esempio n. 19
0
 def ll_compress(fnptr):
     for c, p in unroll_table:
         if fnptr == p:
             return c
     else:
         ll_assert(fnptr == last_p, "unexpected function pointer")
         return last_c
Esempio n. 20
0
 def genraisingop1(self, opname, gv_arg):
     ll_assert(self.rgenop.currently_writing is self,
               "genraisingop1: bad currently_writing")
     gv_res = LLVar(llimpl.genop(self.b, opname, [gv_arg], llimpl.guess))
     gv_exc = LLVar(
         llimpl.genop(self.b, "check_and_clear_exc", [], gv_Bool.v))
     return gv_res, gv_exc
Esempio n. 21
0
def ll_unerase_int(gcref):
    from pypy.rpython.lltypesystem.lloperation import llop
    from pypy.rlib.debug import ll_assert

    x = llop.cast_ptr_to_int(lltype.Signed, gcref)
    ll_assert((x & 1) != 0, "unerased_int(): not an integer")
    return x >> 1
Esempio n. 22
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        newaddr = self.allocate_external_object(totalsize)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        if DEBUG_PRINT:
            self._nonmoving_copy_count += 1
            self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Esempio n. 23
0
File: base.py Progetto: ieure/pypy
 def trace_partial(self, obj, start, stop, callback, arg):
     """Like trace(), but only walk the array part, for indices in
     range(start, stop).  Must only be called if has_gcptr_in_varsize().
     """
     length = stop - start
     typeid = self.get_type_id(obj)
     if self.is_gcarrayofgcptr(typeid):
         # a performance shortcut for GcArray(gcptr)
         item = obj + llmemory.gcarrayofptr_itemsoffset
         item += llmemory.gcarrayofptr_singleitemoffset * start
         while length > 0:
             if self.points_to_valid_gc_object(item):
                 callback(item, arg)
             item += llmemory.gcarrayofptr_singleitemoffset
             length -= 1
         return
     ll_assert(self.has_gcptr_in_varsize(typeid),
               "trace_partial() on object without has_gcptr_in_varsize()")
     item = obj + self.varsize_offset_to_variable_part(typeid)
     offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
     itemlength = self.varsize_item_sizes(typeid)
     item += itemlength * start
     while length > 0:
         j = 0
         while j < len(offsets):
             itemobj = item + offsets[j]
             if self.points_to_valid_gc_object(itemobj):
                 callback(itemobj, arg)
             j += 1
         item += itemlength
         length -= 1
Esempio n. 24
0
 def ll_iter_broadcast_to_shape(ITER, ao, target_ao):
     "iterate over <ao> but broadcast to the shape of <target_ao>"
     ll_assert(target_ao.ndim == ndim, "target_ao.ndim == ndim")
     delta = j = ndim - ao.ndim
     shape = target_ao.shape
     for i in range(ao.ndim):
         if ao.shape[i] != 1 and ao.shape[i] != shape[j]:
             raise Exception("array is not broadcastable to correct shape")
         j += 1
     it = malloc(ITER)
     it.size = ll_mul_list(target_ao.shape, ndim)
     it.nd_m1 = ndim - 1
     #it.factors[ndim-1] = 1
     for i in unroll_ndim:
         it.dims_m1[i] = shape[i]-1
         k = i - delta
         if k<0 or ao.shape[k] != shape[i]:
             #it.contiguous = False
             it.strides[i] = 0
         else:
             it.strides[i] = ao.strides[k]
         it.backstrides[i] = it.strides[i] * it.dims_m1[i]
         #if i > 0:
             #it.factors[ndim-i-1] = it.factors[nd-i]*shape[ndim-i]
     it.ll_reset(ao.dataptr)
     return it
Esempio n. 25
0
 def collect_nursery(self):
     if self.nursery_size > self.top_of_space - self.free:
         # the semispace is running out, do a full collect
         self.obtain_free_space(self.nursery_size)
         ll_assert(self.nursery_size <= self.top_of_space - self.free,
                      "obtain_free_space failed to do its job")
     if self.nursery:
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "minor collect")
         # a nursery-only collection
         scan = beginning = self.free
         self.collect_oldrefs_to_nursery()
         self.collect_roots_in_nursery()
         scan = self.scan_objects_just_copied_out_of_nursery(scan)
         # at this point, all static and old objects have got their
         # GCFLAG_NO_YOUNG_PTRS set again by trace_and_drag_out_of_nursery
         if self.young_objects_with_weakrefs.non_empty():
             self.invalidate_young_weakrefs()
         self.notify_objects_just_moved()
         # mark the nursery as free and fill it with zeroes again
         llarena.arena_reset(self.nursery, self.nursery_size, True)
         if DEBUG_PRINT:
             llop.debug_print(lltype.Void, "percent survived:", float(scan - beginning) / self.nursery_size)
     else:
         # no nursery - this occurs after a full collect, triggered either
         # just above or by some previous non-nursery-based allocation.
         # Grab a piece of the current space for the nursery.
         self.nursery = self.free
         self.nursery_top = self.nursery + self.nursery_size
         self.free = self.nursery_top
     self.nursery_free = self.nursery
     return self.nursery_free
Esempio n. 26
0
 def f(i):
     b = B()
     lst = [i]
     lst[0] += 1
     b.y = lst
     ll_assert(b.y is lst, "copying when reading out the attr?")
     return b.y[0]
Esempio n. 27
0
 def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
     size_gc_header = self.size_gc_header()
     addr = llmemory.cast_ptr_to_adr(ptr)
     ll_assert(
         self.header(addr).tid & GCFLAG_EXTERNAL,
         "realloc() on a non-external object")
     nonvarsize = size_gc_header + fixedsize
     try:
         varsize = ovfcheck(itemsize * newlength)
         tot_size = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     oldlength = (addr + lengthofs).signed[0]
     old_tot_size = size_gc_header + fixedsize + oldlength * itemsize
     source_addr = addr - size_gc_header
     self.gen2_resizable_objects.remove(addr)
     if grow:
         result = llop.raw_realloc_grow(llmemory.Address, source_addr,
                                        old_tot_size, tot_size)
     else:
         result = llop.raw_realloc_shrink(llmemory.Address, source_addr,
                                          old_tot_size, tot_size)
     if not result:
         self.gen2_resizable_objects.append(addr)
         raise MemoryError()
     if grow:
         self.gen2_resizable_objects.append(result + size_gc_header)
     else:
         self.gen2_rawmalloced_objects.append(result + size_gc_header)
     self._check_rawsize_alloced(raw_malloc_usage(tot_size) -
                                 raw_malloc_usage(old_tot_size),
                                 can_collect=not grow)
     (result + size_gc_header + lengthofs).signed[0] = newlength
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Esempio n. 28
0
 def attach_handle_on_suspstack(self, handle):
     s = self.suspstack
     self.suspstack = NULL_SUSPSTACK
     ll_assert(bool(s.anchor), "s.anchor should not be null")
     s.handle = handle
     llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(s))
     return s
Esempio n. 29
0
def ll_array_inplace_binop(ITEM, it0, it1, binop):
    ll_assert(it0.size == it1.size, "it0.size == it1.size")
    while it0.index < it0.size:
        it0.dataptr[0] = cast_primitive(ITEM,
                                        binop(it0.dataptr[0], it1.dataptr[0]))
        it0.ll_next()
        it1.ll_next()
Esempio n. 30
0
 def malloc_fixedsize_clear(self, typeid, size, can_collect,
                            has_finalizer=False, contains_weakptr=False):
     if (has_finalizer or not can_collect or
         (raw_malloc_usage(size) > self.lb_young_var_basesize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   can_collect,
                                                   has_finalizer,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 31
0
def ll_extend_with_str_slice(lst, s, getstrlen, getstritem, slice):
    start = slice.start
    stop = slice.stop
    len1 = lst.ll_length()
    len2 = getstrlen(s)
    ll_assert(start >= 0, "unexpectedly negative str slice start")
    ll_assert(start <= len2, "str slice start larger than str length")
    if stop > len2:
        stop = len2
    count2 = stop - start
    assert count2 >= 0, "str slice stop smaller than start"
    try:
        newlength = ovfcheck(len1 + count2)
    except OverflowError:
        raise MemoryError
    lst._ll_resize_ge(newlength)
    i = start
    j = len1
    while i < stop:
        c = getstritem(s, i)
        if listItemType(lst) is UniChar:
            c = unichr(ord(c))
        lst.ll_setitem_fast(j, c)
        i += 1
        j += 1
Esempio n. 32
0
 def _really_force(self):
     if self.source_op is None:
         # this case should not occur; I only managed to get it once
         # in pypy-c-jit and couldn't reproduce it.  The point is
         # that it relies on optimizefindnode.py computing exactly
         # the right level of specialization, and it seems that there
         # is still a corner case where it gets too specialized for
         # optimizeopt.py.  Let's not crash in release-built
         # pypy-c-jit's.  XXX find out when
         from pypy.rlib.debug import ll_assert
         ll_assert(False, "_really_force: source_op is None")
         raise InvalidLoop
     #
     newoperations = self.optimizer.newoperations
     newoperations.append(self.source_op)
     self.box = box = self.source_op.result
     #
     iteritems = self._fields.iteritems()
     if not we_are_translated():  #random order is fine, except for tests
         iteritems = list(iteritems)
         iteritems.sort(key=lambda (x, y): x.sort_key())
     for ofs, value in iteritems:
         if value.is_null():
             continue
         subbox = value.force_box()
         op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, descr=ofs)
         newoperations.append(op)
     self._fields = None
Esempio n. 33
0
    def walk_stack_from(self):
        curframe = lltype.malloc(WALKFRAME, flavor='raw')
        otherframe = lltype.malloc(WALKFRAME, flavor='raw')

        # Walk over all the pieces of stack.  They are in a circular linked
        # list of structures of 7 words, the 2 first words being prev/next.
        # The anchor of this linked list is:
        anchor = llop.gc_asmgcroot_static(llmemory.Address, 3)
        initialframedata = anchor.address[1]
        stackscount = 0
        while initialframedata != anchor:     # while we have not looped back
            self.fill_initial_frame(curframe, initialframedata)
            # Loop over all the frames in the stack
            while self.walk_to_parent_frame(curframe, otherframe):
                swap = curframe
                curframe = otherframe    # caller becomes callee
                otherframe = swap
            # Then proceed to the next piece of stack
            initialframedata = initialframedata.address[1]
            stackscount += 1
        #
        expected = rffi.stackcounter.stacks_counter
        ll_assert(not (stackscount < expected), "non-closed stacks around")
        ll_assert(not (stackscount > expected), "stacks counter corruption?")
        lltype.free(otherframe, flavor='raw')
        lltype.free(curframe, flavor='raw')
Esempio n. 34
0
File: rpbc.py Progetto: ieure/pypy
 def ll_compress(fnptr):
     for c, p in unroll_table:
         if fnptr == p:
             return c
     else:
         ll_assert(fnptr == last_p, "unexpected function pointer")
         return last_c
Esempio n. 35
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size and
         self.double_space_size()):
         pass    # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True      # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False    # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                      "double_space_size() failed to do its job")
         return True
Esempio n. 36
0
 def ll_iter_broadcast_to_shape(ITER, ao, target_ao):
     "iterate over <ao> but broadcast to the shape of <target_ao>"
     ll_assert(target_ao.ndim == ndim, "target_ao.ndim == ndim")
     delta = j = ndim - ao.ndim
     shape = target_ao.shape
     for i in range(ao.ndim):
         if ao.shape[i] != 1 and ao.shape[i] != shape[j]:
             raise Exception("array is not broadcastable to correct shape")
         j += 1
     it = malloc(ITER)
     it.size = ll_mul_list(target_ao.shape, ndim)
     it.nd_m1 = ndim - 1
     #it.factors[ndim-1] = 1
     for i in unroll_ndim:
         it.dims_m1[i] = shape[i] - 1
         k = i - delta
         if k < 0 or ao.shape[k] != shape[i]:
             #it.contiguous = False
             it.strides[i] = 0
         else:
             it.strides[i] = ao.strides[k]
         it.backstrides[i] = it.strides[i] * it.dims_m1[i]
         #if i > 0:
         #it.factors[ndim-i-1] = it.factors[nd-i]*shape[ndim-i]
     it.ll_reset(ao.dataptr)
     return it
Esempio n. 37
0
    def walk_stack_from(self):
        curframe = lltype.malloc(WALKFRAME, flavor='raw')
        otherframe = lltype.malloc(WALKFRAME, flavor='raw')

        # Walk over all the pieces of stack.  They are in a circular linked
        # list of structures of 7 words, the 2 first words being prev/next.
        # The anchor of this linked list is:
        anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
        initialframedata = anchor.address[1]
        stackscount = 0
        while initialframedata != anchor:     # while we have not looped back
            self.fill_initial_frame(curframe, initialframedata)
            # Loop over all the frames in the stack
            while self.walk_to_parent_frame(curframe, otherframe):
                swap = curframe
                curframe = otherframe    # caller becomes callee
                otherframe = swap
            # Then proceed to the next piece of stack
            initialframedata = initialframedata.address[1]
            stackscount += 1
        #
        expected = rffi.stackcounter.stacks_counter
        ll_assert(not (stackscount < expected), "non-closed stacks around")
        ll_assert(not (stackscount > expected), "stacks counter corruption?")
        lltype.free(otherframe, flavor='raw')
        lltype.free(curframe, flavor='raw')
Esempio n. 38
0
 def restore_state_from(self, shadowstackref):
     ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
     ll_assert(shadowstackref.base <= shadowstackref.top,
               "restore_state_from: broken shadowstack")
     self.gcdata.root_stack_base = shadowstackref.base
     self.gcdata.root_stack_top  = shadowstackref.top
     self._cleanup(shadowstackref)
Esempio n. 39
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size
             and self.double_space_size()):
         pass  # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True  # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False  # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                   "double_space_size() failed to do its job")
         return True
Esempio n. 40
0
 def malloc_fixedsize_clear(self,
                            typeid,
                            size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize
          and raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Esempio n. 41
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False  # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size  # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                      "unexpected growth of GC space usage during collect")
            return False  # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True  # success
Esempio n. 42
0
File: hybrid.py Progetto: ieure/pypy
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Esempio n. 43
0
 def get_header_forwarded_addr(self, obj):
     tid = self.header_forwarded(obj).tid
     ll_assert(tid & GCFLAG_MARKBIT != 0, "dying object is not forwarded")
     GCFLAG_MASK = ~(GCFLAG_MARKBIT | 3)
     res = self.base_forwarding_addr + (tid & GCFLAG_MASK) + self.gcheaderbuilder.size_gc_header
     ll_assert(res < self.finaladdr, "forwarded address >= self.finaladdr")
     return res
Esempio n. 44
0
 def setup_root_walker(self):
     stackbase = self.allocate_stack()
     ll_assert(bool(stackbase), "could not allocate root stack")
     self.gcdata.root_stack_top  = stackbase
     self.gcdata.root_stack_base = stackbase
     if self.thread_setup is not None:
         self.thread_setup()
Esempio n. 45
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        newaddr = self.allocate_external_object(totalsize)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        if self.config.gcconfig.debugprint:
            self._nonmoving_copy_count += 1
            self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid |= self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Esempio n. 46
0
 def setup_root_walker(self):
     stackbase = self.allocate_stack()
     ll_assert(bool(stackbase), "could not allocate root stack")
     self.gcdata.root_stack_top  = stackbase
     self.gcdata.root_stack_base = stackbase
     if self.thread_setup is not None:
         self.thread_setup()
Esempio n. 47
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(
         raw_malloc_usage(totalsize) <= self.nursery_top - result,
         "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
     return result
Esempio n. 48
0
def ll_array_set(ITEM, it0, it1):
    if it0.size == 0:
        return  # empty LHS..
    ll_assert(it0.size == it1.size, "it0.size == it1.size")
    while it0.index < it0.size:
        it0.dataptr[0] = cast_primitive(ITEM, it1.dataptr[0])
        it0.ll_next()
        it1.ll_next()
Esempio n. 49
0
def ll_listslice_startonly(RESLIST, l1, start):
    len1 = l1.ll_length()
    ll_assert(start >= 0, "unexpectedly negative list slice start")
    ll_assert(start <= len1, "list slice start larger than list length")
    newlength = len1 - start
    l = RESLIST.ll_newlist(newlength)
    ll_arraycopy(l1, l, start, 0, newlength)
    return l
Esempio n. 50
0
 def pop(self):
     used = self.used_in_last_chunk - 1
     ll_assert(used >= 0, "pop on empty AddressStack")
     result = self.chunk.items[used]
     self.used_in_last_chunk = used
     if used == 0 and self.chunk.next:
         self.shrink()
     return result
Esempio n. 51
0
def ll_listslice_startonly(RESLIST, l1, start):
    len1 = l1.ll_length()
    ll_assert(start >= 0, "unexpectedly negative list slice start")
    ll_assert(start <= len1, "list slice start larger than list length")
    newlength = len1 - start
    l = RESLIST.ll_newlist(newlength)
    ll_arraycopy(l1, l, start, 0, newlength)
    return l
Esempio n. 52
0
def ll_setitem_nonneg(func, l, index, newitem):
    ll_assert(index >= 0, "unexpectedly negative list setitem index")
    if func is dum_checkidx:
        if index >= l.ll_length():
            raise IndexError
    else:
        ll_assert(index < l.ll_length(), "list setitem index out of bound")
    l.ll_setitem_fast(index, newitem)
Esempio n. 53
0
def ll_array_set(ITEM, it0, it1):
    if it0.size == 0:
        return # empty LHS..
    ll_assert(it0.size == it1.size, "it0.size == it1.size")
    while it0.index < it0.size:
        it0.dataptr[0] = cast_primitive(ITEM, it1.dataptr[0])
        it0.ll_next()
        it1.ll_next()
Esempio n. 54
0
File: hybrid.py Progetto: ieure/pypy
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Esempio n. 55
0
 def pop(self):
     used = self.used_in_last_chunk - 1
     ll_assert(used >= 0, "pop on empty AddressStack")
     result = self.chunk.items[used]
     self.used_in_last_chunk = used
     if used == 0 and self.chunk.next:
         self.shrink()
     return result
Esempio n. 56
0
File: hybrid.py Progetto: ieure/pypy
 def debug_check_object(self, obj):
     """Check the invariants about 'obj' that should be true
     between collections."""
     GenerationGC.debug_check_object(self, obj)
     tid = self.header(obj).tid
     if tid & GCFLAG_UNVISITED:
         ll_assert(self._d_gen2ro.contains(obj),
                   "GCFLAG_UNVISITED on non-gen2 object")
Esempio n. 57
0
 def get_header_forwarded_addr(self, obj):
     tid = self.header_forwarded(obj).tid
     ll_assert(tid & GCFLAG_MARKBIT != 0, "dying object is not forwarded")
     GCFLAG_MASK = ~(GCFLAG_MARKBIT | 3)
     res = (self.base_forwarding_addr + (tid & GCFLAG_MASK) +
            self.gcheaderbuilder.size_gc_header)
     ll_assert(res < self.finaladdr, "forwarded address >= self.finaladdr")
     return res
Esempio n. 58
0
 def debug_check_object(self, obj):
     """Check the invariants about 'obj' that should be true
     between collections."""
     GenerationGC.debug_check_object(self, obj)
     tid = self.header(obj).tid
     if tid & GCFLAG_UNVISITED:
         ll_assert(self._d_gen2ro.contains(obj),
                   "GCFLAG_UNVISITED on non-gen2 object")
Esempio n. 59
0
def ll_setitem_nonneg(func, l, index, newitem):
    ll_assert(index >= 0, "unexpectedly negative list setitem index")
    if func is dum_checkidx:
        if index >= l.ll_length():
            raise IndexError
    else:
        ll_assert(index < l.ll_length(), "list setitem index out of bound")
    l.ll_setitem_fast(index, newitem)
Esempio n. 60
0
 def get_type_id(self, addr):
     tid = self.header(addr).tid
     ll_assert(tid & (GCFLAG_FORWARDED|GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
               "get_type_id on forwarded obj")
     # Non-prebuilt forwarded objects are overwritten with a FORWARDSTUB.
     # Although calling get_type_id() on a forwarded object works by itself,
     # we catch it as an error because it's likely that what is then
     # done with the typeid is bogus.
     return llop.extract_ushort(llgroup.HALFWORD, tid)