Esempio n. 1
0
 def command_print(cmd, extra):
     if extra == 'print-me':
         stuff = dbstate.stuff
     elif extra == '$0':
         stuff = dbstate.metavar
     elif extra == '2.35':
         val = rdtoa.strtod('2.35')
         valx, valy = math.modf(val)
         revdb.send_output(rdtoa.dtoa(valx) + '\n')
         revdb.send_output(rdtoa.dtoa(valy) + '\n')
         xx, yy = math.frexp(val)
         revdb.send_output(rdtoa.dtoa(xx) + '\n')
         revdb.send_output('%d\n' % yy)
         return
     elif extra == 'very-long-loop':
         i = 0
         total = 0
         while i < 2000000000:
             total += revdb.flag_io_disabled()
             i += 1
         revdb.send_output(str(total))
         return
     else:
         assert False
     uid = revdb.get_unique_id(stuff)
     ll_assert(uid > 0, "uid == 0")
     revdb.send_nextnid(uid)  # outputs '$NUM = '
     revdb.send_output('stuff\n')
     dbstate.printed_stuff = stuff
Esempio n. 2
0
 def restore_state_from(self, shadowstackref):
     ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
     ll_assert(shadowstackref.base <= shadowstackref.top,
               "restore_state_from: broken shadowstack")
     self.gcdata.root_stack_base = shadowstackref.base
     self.gcdata.root_stack_top = shadowstackref.top
     self._cleanup(shadowstackref)
Esempio n. 3
0
 def forget_current_state(self):
     ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top,
               "forget_current_state: shadowstack not empty!")
     if self.unused_full_stack:
         llmemory.raw_free(self.unused_full_stack)
     self.unused_full_stack = self.gcdata.root_stack_base
     self.gcdata.root_stack_top = llmemory.NULL  # to detect missing restore
Esempio n. 4
0
 def trace_partial(self, obj, start, stop, callback, arg):
     """Like trace(), but only walk the array part, for indices in
     range(start, stop).  Must only be called if has_gcptr_in_varsize().
     """
     length = stop - start
     typeid = self.get_type_id(obj)
     if self.is_gcarrayofgcptr(typeid):
         # a performance shortcut for GcArray(gcptr)
         item = obj + llmemory.gcarrayofptr_itemsoffset
         item += llmemory.gcarrayofptr_singleitemoffset * start
         while length > 0:
             if self.points_to_valid_gc_object(item):
                 callback(item, arg)
             item += llmemory.gcarrayofptr_singleitemoffset
             length -= 1
         return
     ll_assert(self.has_gcptr_in_varsize(typeid), "trace_partial() on object without has_gcptr_in_varsize()")
     item = obj + self.varsize_offset_to_variable_part(typeid)
     offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
     itemlength = self.varsize_item_sizes(typeid)
     item += itemlength * start
     while length > 0:
         j = 0
         while j < len(offsets):
             itemobj = item + offsets[j]
             if self.points_to_valid_gc_object(itemobj):
                 callback(itemobj, arg)
             j += 1
         item += itemlength
         length -= 1
Esempio n. 5
0
def _ll_dict_setitem_lookup_done(d, key, value, hash, i):
    valid = (i & HIGHEST_BIT) == 0
    i = i & MASK
    ENTRY = lltype.typeOf(d.entries).TO.OF
    entry = d.entries[i]
    if not d.entries.everused(i):
        # a new entry that was never used before
        ll_assert(not valid, "valid but not everused")
        rc = d.resize_counter - 3
        if rc <= 0:  # if needed, resize the dict -- before the insertion
            ll_dict_resize(d)
            i = ll_dict_lookup_clean(d, hash)  # then redo the lookup for 'key'
            entry = d.entries[i]
            rc = d.resize_counter - 3
            ll_assert(rc > 0, "ll_dict_resize failed?")
        d.resize_counter = rc
        if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
        entry.value = value
    else:
        # override an existing or deleted entry
        entry.value = value
        if valid:
            return
    entry.key = key
    if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
    if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
    d.num_items += 1
Esempio n. 6
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL  # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)  # visit me
        return newobj
Esempio n. 7
0
    def walk_stack_from(self):
        curframe = lltype.malloc(WALKFRAME, flavor='raw')
        otherframe = lltype.malloc(WALKFRAME, flavor='raw')

        # Walk over all the pieces of stack.  They are in a circular linked
        # list of structures of 7 words, the 2 first words being prev/next.
        # The anchor of this linked list is:
        anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
        initialframedata = anchor.address[1]
        stackscount = 0
        while initialframedata != anchor:     # while we have not looped back
            self.fill_initial_frame(curframe, initialframedata)
            # Loop over all the frames in the stack
            while self.walk_to_parent_frame(curframe, otherframe):
                swap = curframe
                curframe = otherframe    # caller becomes callee
                otherframe = swap
            # Then proceed to the next piece of stack
            initialframedata = initialframedata.address[1]
            stackscount += 1
        #
        expected = rffi.stackcounter.stacks_counter
        if NonConstant(0):
            rffi.stackcounter.stacks_counter += 42    # hack to force it
        ll_assert(not (stackscount < expected), "non-closed stacks around")
        ll_assert(not (stackscount > expected), "stacks counter corruption?")
        lltype.free(otherframe, flavor='raw')
        lltype.free(curframe, flavor='raw')
Esempio n. 8
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False  # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size  # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                      "unexpected growth of GC space usage during collect")
            return False  # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True  # success
Esempio n. 9
0
 def _prepare_unused_stack(self):
     ll_assert(self.unused_full_stack == llmemory.NULL,
               "already an unused_full_stack")
     root_stack_size = sizeofaddr * self.root_stack_depth
     self.unused_full_stack = llmemory.raw_malloc(root_stack_size)
     if self.unused_full_stack == llmemory.NULL:
         raise MemoryError
Esempio n. 10
0
 def ll_prepare_free_slot(_unused):
     """Free up a slot in the array of MAX entries, ready for storing
     a new shadowstackref.  Return the memory of the now-unused full
     shadowstack.
     """
     index = fullstack_cache[0]
     if index > 0:
         return llmemory.NULL     # there is already at least one free slot
     #
     # make a compact copy in one old entry and return the
     # original full-sized memory
     index = -index
     ll_assert(index > 0, "prepare_free_slot: cache[0] == 0")
     compacting = lltype.cast_int_to_ptr(SHADOWSTACKREFPTR,
                                         fullstack_cache[index])
     index += 1
     if index >= ShadowStackPool.MAX:
         index = 1
     fullstack_cache[0] = -index    # update to the next value in order
     #
     compacting.detach()
     original = compacting.base
     size = compacting.top - original
     new = llmemory.raw_malloc(size)
     if new == llmemory.NULL:
         return llmemory.NULL
     llmemory.raw_memcopy(original, new, size)
     compacting.base = new
     compacting.top = new + size
     return original
Esempio n. 11
0
 def save_current_state_away(self, shadowstackref, ncontext):
     """Save the current state away into 'shadowstackref'.
     This either works, or raise MemoryError and nothing is done.
     To do a switch, first call save_current_state_away() or
     forget_current_state(), and then call restore_state_from()
     or start_fresh_new_state().
     """
     fresh_free_fullstack = shadowstackref.prepare_free_slot()
     if self.unused_full_stack:
         if fresh_free_fullstack:
             llmemory.raw_free(fresh_free_fullstack)
     elif fresh_free_fullstack:
         self.unused_full_stack = fresh_free_fullstack
     else:
         self._prepare_unused_stack()
     #
     shadowstackref.base = self.gcdata.root_stack_base
     shadowstackref.top  = self.gcdata.root_stack_top
     shadowstackref.context = ncontext
     ll_assert(shadowstackref.base <= shadowstackref.top,
               "save_current_state_away: broken shadowstack")
     shadowstackref.attach()
     #
     # cannot use llop.gc_writebarrier() here, because
     # we are in a minimally-transformed GC helper :-/
     gc = self.gcdata.gc
     if hasattr(gc.__class__, 'write_barrier'):
         shadowstackadr = llmemory.cast_ptr_to_adr(shadowstackref)
         gc.write_barrier(shadowstackadr)
     #
     self.gcdata.root_stack_top = llmemory.NULL  # to detect missing restore
Esempio n. 12
0
 def ll_compress(fnptr):
     for c, p in unroll_table:
         if fnptr == p:
             return c
     else:
         ll_assert(fnptr == last_p, "unexpected function pointer")
         return last_c
Esempio n. 13
0
 def malloc_fixedsize_clear(self, typeid, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 14
0
 def trace_partial(self, obj, start, stop, callback, arg):
     """Like trace(), but only walk the array part, for indices in
     range(start, stop).  Must only be called if has_gcptr_in_varsize().
     """
     length = stop - start
     typeid = self.get_type_id(obj)
     if self.is_gcarrayofgcptr(typeid):
         # a performance shortcut for GcArray(gcptr)
         item = obj + llmemory.gcarrayofptr_itemsoffset
         item += llmemory.gcarrayofptr_singleitemoffset * start
         while length > 0:
             if self.points_to_valid_gc_object(item):
                 callback(item, arg)
             item += llmemory.gcarrayofptr_singleitemoffset
             length -= 1
         return
     ll_assert(self.has_gcptr_in_varsize(typeid),
               "trace_partial() on object without has_gcptr_in_varsize()")
     item = obj + self.varsize_offset_to_variable_part(typeid)
     offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
     itemlength = self.varsize_item_sizes(typeid)
     item += itemlength * start
     while length > 0:
         j = 0
         while j < len(offsets):
             itemobj = item + offsets[j]
             if self.points_to_valid_gc_object(itemobj):
                 callback(itemobj, arg)
             j += 1
         item += itemlength
         length -= 1
Esempio n. 15
0
 def restore_state_from(self, shadowstackref):
     ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
     ll_assert(shadowstackref.base <= shadowstackref.top,
               "restore_state_from: broken shadowstack")
     self.gcdata.root_stack_base = shadowstackref.base
     self.gcdata.root_stack_top  = shadowstackref.top
     self._cleanup(shadowstackref)
Esempio n. 16
0
 def f(i):
     b = B()
     lst = [i]
     lst[0] += 1
     b.y = lst
     ll_assert(b.y is lst, "copying when reading out the attr?")
     return b.y[0]
Esempio n. 17
0
def jitframe_trace(gc, obj_addr, callback, arg):
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_force_descr'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_savedata'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_guard_exc'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_forward'))

    if IS_32BIT:
        MAX = 32
    else:
        MAX = 64
    gcmap = (obj_addr + getofs('jf_gcmap')).address[0]
    if not gcmap:
        return      # done
    gcmap_lgt = (gcmap + GCMAPLENGTHOFS).signed[0]
    no = 0
    while no < gcmap_lgt:
        cur = (gcmap + GCMAPBASEOFS + UNSIGN_SIZE * no).unsigned[0]
        bitindex = 0
        while bitindex < MAX:
            if cur & (1 << bitindex):
                # the 'bitindex' is set in 'cur'
                index = no * SIZEOFSIGNED * 8 + bitindex
                # sanity check
                frame_lgt = (obj_addr + getofs('jf_frame') + LENGTHOFS) \
                    .signed[0]
                ll_assert(index < frame_lgt, "bogus frame field get")
                gc._trace_callback(callback, arg,
                                   obj_addr + getofs('jf_frame') +
                                   BASEITEMOFS + SIGN_SIZE * index)
            bitindex += 1
        no += 1
Esempio n. 18
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size
             and self.double_space_size()):
         pass  # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True  # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False  # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                   "double_space_size() failed to do its job")
         return True
Esempio n. 19
0
 def f(i):
     b = B()
     lst = [i]
     lst[0] += 1
     b.y = lst
     ll_assert(b.y is lst, "copying when reading out the attr?")
     return b.y[0]
Esempio n. 20
0
    def double_space_size(self):
        self.red_zone = 0
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False    # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size    # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
            ll_assert(self.free <= self.top_of_space,
                         "unexpected growth of GC space usage during collect")
            return False     # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True    # success
Esempio n. 21
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size and
         self.double_space_size()):
         pass    # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True      # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False    # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                      "double_space_size() failed to do its job")
         return True
Esempio n. 22
0
 def attach_handle_on_suspstack(self, handle):
     s = self.suspstack
     self.suspstack = NULL_SUSPSTACK
     ll_assert(bool(s.anchor), "s.anchor should not be null")
     s.handle = handle
     llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s))
     return s
Esempio n. 23
0
def jitframe_trace(gc, obj_addr, callback, arg):
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_force_descr'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_savedata'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_guard_exc'))
    gc._trace_callback(callback, arg, obj_addr + getofs('jf_forward'))

    if IS_32BIT:
        MAX = 32
    else:
        MAX = 64
    gcmap = (obj_addr + getofs('jf_gcmap')).address[0]
    if not gcmap:
        return  # done
    gcmap_lgt = (gcmap + GCMAPLENGTHOFS).signed[0]
    no = 0
    while no < gcmap_lgt:
        cur = (gcmap + GCMAPBASEOFS + UNSIGN_SIZE * no).unsigned[0]
        bitindex = 0
        while bitindex < MAX:
            if cur & (1 << bitindex):
                # the 'bitindex' is set in 'cur'
                index = no * SIZEOFSIGNED * 8 + bitindex
                # sanity check
                frame_lgt = (obj_addr + getofs('jf_frame') + LENGTHOFS) \
                    .signed[0]
                ll_assert(index < frame_lgt, "bogus frame field get")
                gc._trace_callback(
                    callback, arg, obj_addr + getofs('jf_frame') +
                    BASEITEMOFS + SIGN_SIZE * index)
            bitindex += 1
        no += 1
Esempio n. 24
0
 def attach_handle_on_suspstack(self, handle):
     s = self.suspstack
     self.suspstack = NULL_SUSPSTACK
     ll_assert(bool(s.anchor), "s.anchor should not be null")
     s.handle = handle
     llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s))
     return s
Esempio n. 25
0
 def forget_current_state(self):
     ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top,
               "forget_current_state: shadowstack not empty!")
     if self.unused_full_stack:
         llmemory.raw_free(self.unused_full_stack)
     self.unused_full_stack = self.gcdata.root_stack_base
     self.gcdata.root_stack_top = llmemory.NULL  # to detect missing restore
Esempio n. 26
0
def _ll_dict_setitem_lookup_done(d, key, value, hash, i):
    valid = (i & HIGHEST_BIT) == 0
    i = i & MASK
    ENTRY = lltype.typeOf(d.entries).TO.OF
    entry = d.entries[i]
    if not d.entries.everused(i):
        # a new entry that was never used before
        ll_assert(not valid, "valid but not everused")
        rc = d.resize_counter - 3
        if rc <= 0:       # if needed, resize the dict -- before the insertion
            ll_dict_resize(d)
            i = ll_dict_lookup_clean(d, hash)  # then redo the lookup for 'key'
            entry = d.entries[i]
            rc = d.resize_counter - 3
            ll_assert(rc > 0, "ll_dict_resize failed?")
        d.resize_counter = rc
        if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
        entry.value = value
    else:
        # override an existing or deleted entry
        entry.value = value
        if valid:
            return
    entry.key = key
    if hasattr(ENTRY, 'f_hash'):  entry.f_hash = hash
    if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
    d.num_items += 1
Esempio n. 27
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Esempio n. 28
0
 def malloc_fixedsize_clear(self, typeid, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Esempio n. 29
0
def get_result_suspstack(h):
    # Now we are in the target, after the switch() or the new().
    # Note that this whole module was carefully written in such a way as
    # not to invoke pushing/popping things off the shadowstack at
    # unexpected moments...
    oldsuspstack = gcrootfinder.oldsuspstack
    newsuspstack = gcrootfinder.newsuspstack
    gcrootfinder.oldsuspstack = NULL_SUSPSTACK
    gcrootfinder.newsuspstack = NULL_SUSPSTACK
    if not h:
        raise MemoryError
    # We still have the old shadowstack active at this point; save it
    # away, and restore the new one
    if oldsuspstack:
        ll_assert(not _c.is_empty_handle(h),"unexpected empty stacklet handle")
        h = llmemory.cast_ptr_to_adr(h)
        llop.gc_save_current_state_away(lltype.Void, oldsuspstack, h)
    else:
        ll_assert(_c.is_empty_handle(h),"unexpected non-empty stacklet handle")
        llop.gc_forget_current_state(lltype.Void)
    #
    llop.gc_restore_state_from(lltype.Void, newsuspstack)
    #
    # From this point on, 'newsuspstack' is consumed and done, its
    # shadow stack installed as the current one.  It should not be
    # used any more.  For performance, we avoid it being deallocated
    # by letting it be reused on the next switch.
    gcrootfinder.oldsuspstack = newsuspstack
    # Return.
    return oldsuspstack
Esempio n. 30
0
 def debug_check_object(self, obj):
     """Check the invariants about 'obj' that should be true
     between collections."""
     GenerationGC.debug_check_object(self, obj)
     tid = self.header(obj).tid
     if tid & GCFLAG_UNVISITED:
         ll_assert(self._d_gen2ro.contains(obj),
                   "GCFLAG_UNVISITED on non-gen2 object")
Esempio n. 31
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Esempio n. 32
0
def ll_listslice_startonly(RESLIST, l1, start):
    len1 = l1.ll_length()
    ll_assert(start >= 0, "unexpectedly negative list slice start")
    ll_assert(start <= len1, "list slice start larger than list length")
    newlength = len1 - start
    l = RESLIST.ll_newlist(newlength)
    ll_arraycopy(l1, l, start, 0, newlength)
    return l
Esempio n. 33
0
def ll_record_exact_class(ll_value, ll_cls):
    from rpython.rlib.debug import ll_assert
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rtyper.lltypesystem import lltype
    from rpython.rtyper.rclass import ll_type
    ll_assert(ll_value == lltype.nullptr(lltype.typeOf(ll_value).TO), "record_exact_class called with None argument")
    ll_assert(ll_type(ll_value) is ll_cls, "record_exact_class called with invalid arguments")
    llop.jit_record_exact_class(lltype.Void, ll_value, ll_cls)
Esempio n. 34
0
 def mass_free(self, ok_to_free_func):
     """For each object, if ok_to_free_func(obj) returns True, then free
     the object.
     """
     self.mass_free_prepare()
     #
     res = self.mass_free_incremental(ok_to_free_func, sys.maxint)
     ll_assert(res, "non-incremental mass_free_in_pages() returned False")
Esempio n. 35
0
 def ll_attach(shadowstackref):
     """After prepare_free_slot(), store a shadowstackref in that slot."""
     index = fullstack_cache[0]
     ll_assert(index > 0, "fullstack attach: no free slot")
     fullstack_cache[0] = fullstack_cache[index]
     fullstack_cache[index] = lltype.cast_ptr_to_int(shadowstackref)
     ll_assert(shadowstackref.fsindex == 0, "fullstack attach: already one?")
     shadowstackref.fsindex = index    # > 0
Esempio n. 36
0
def ll_shrink_final(ll_builder):
    final_size = ll_builder.current_pos
    ll_assert(final_size <= ll_builder.total_size,
              "final_size > ll_builder.total_size?")
    buf = rgc.ll_shrink_array(ll_builder.current_buf, final_size)
    ll_builder.current_buf = buf
    ll_builder.current_end = final_size
    ll_builder.total_size = final_size
Esempio n. 37
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(
         raw_malloc_usage(totalsize) <= self.nursery_top - result,
         "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
     return result
Esempio n. 38
0
def ll_shrink_final(ll_builder):
    final_size = ll_builder.current_pos
    ll_assert(final_size <= ll_builder.total_size,
              "final_size > ll_builder.total_size?")
    buf = rgc.ll_shrink_array(ll_builder.current_buf, final_size)
    ll_builder.current_buf = buf
    ll_builder.current_end = final_size
    ll_builder.total_size = final_size
Esempio n. 39
0
 def debug_check_object(self, obj):
     """Check the invariants about 'obj' that should be true
     between collections."""
     GenerationGC.debug_check_object(self, obj)
     tid = self.header(obj).tid
     if tid & GCFLAG_UNVISITED:
         ll_assert(self._d_gen2ro.contains(obj),
                   "GCFLAG_UNVISITED on non-gen2 object")
Esempio n. 40
0
def ll_listslice_startonly(RESLIST, l1, start):
    len1 = l1.ll_length()
    ll_assert(start >= 0, "unexpectedly negative list slice start")
    ll_assert(start <= len1, "list slice start larger than list length")
    newlength = len1 - start
    l = RESLIST.ll_newlist(newlength)
    ll_arraycopy(l1, l, start, 0, newlength)
    return l
Esempio n. 41
0
File: jit.py Progetto: sota/pypy-old
def ll_record_exact_class(ll_value, ll_cls):
    from rpython.rlib.debug import ll_assert
    from rpython.rtyper.lltypesystem.lloperation import llop
    from rpython.rtyper.lltypesystem import lltype
    from rpython.rtyper.rclass import ll_type
    ll_assert(ll_value != lltype.nullptr(lltype.typeOf(ll_value).TO), "record_exact_class called with None argument")
    ll_assert(ll_type(ll_value) is ll_cls, "record_exact_class called with invalid arguments")
    llop.jit_record_exact_class(lltype.Void, ll_value, ll_cls)
Esempio n. 42
0
 def pop(self):
     used = self.used_in_last_chunk - 1
     ll_assert(used >= 0, "pop on empty AddressStack")
     result = self.chunk.items[used]
     self.used_in_last_chunk = used
     if used == 0 and self.chunk.next:
         self.shrink()
     return result
Esempio n. 43
0
 def command_attachid(cmd, extra):
     index_metavar = cmd.c_arg1
     uid = cmd.c_arg2
     ll_assert(index_metavar == 0, "index_metavar != 0")  # in this test
     dbstate.metavar = dbstate.printed_stuff
     if dbstate.metavar is None:
         # uid not found, probably a future object
         dbstate.watch_future = uid
Esempio n. 44
0
 def mass_free(self, ok_to_free_func):
     """For each object, if ok_to_free_func(obj) returns True, then free
     the object.
     """
     self.mass_free_prepare()
     #
     res = self.mass_free_incremental(ok_to_free_func, sys.maxint)
     ll_assert(res, "non-incremental mass_free_in_pages() returned False")
Esempio n. 45
0
 def pop(self):
     used = self.used_in_last_chunk - 1
     ll_assert(used >= 0, "pop on empty AddressStack")
     result = self.chunk.items[used]
     self.used_in_last_chunk = used
     if used == 0 and self.chunk.next:
         self.shrink()
     return result
Esempio n. 46
0
    def deal_with_objects_with_finalizers(self, scan):
        # walk over list of objects with finalizers
        # if it is not copied, add it to the list of to-be-called finalizers
        # and copy it, to me make the finalizer runnable
        # We try to run the finalizers in a "reasonable" order, like
        # CPython does.  The details of this algorithm are in
        # pypy/doc/discussion/finalizer-order.txt.
        new_with_finalizer = self.AddressDeque()
        marked = self.AddressDeque()
        pending = self.AddressStack()
        self.tmpstack = self.AddressStack()
        while self.objects_with_finalizers.non_empty():
            x = self.objects_with_finalizers.popleft()
            fq_nr = self.objects_with_finalizers.popleft()
            ll_assert(
                self._finalization_state(x) != 1, "bad finalization state 1")
            if self.surviving(x):
                new_with_finalizer.append(self.get_forwarding_address(x))
                new_with_finalizer.append(fq_nr)
                continue
            marked.append(x)
            marked.append(fq_nr)
            pending.append(x)
            while pending.non_empty():
                y = pending.pop()
                state = self._finalization_state(y)
                if state == 0:
                    self._bump_finalization_state_from_0_to_1(y)
                    self.trace(y, self._append_if_nonnull, pending)
                elif state == 2:
                    self._recursively_bump_finalization_state_from_2_to_3(y)
            scan = self._recursively_bump_finalization_state_from_1_to_2(
                x, scan)

        while marked.non_empty():
            x = marked.popleft()
            fq_nr = marked.popleft()
            state = self._finalization_state(x)
            ll_assert(state >= 2, "unexpected finalization state < 2")
            newx = self.get_forwarding_address(x)
            if state == 2:
                from rpython.rtyper.lltypesystem import rffi
                fq_index = rffi.cast(lltype.Signed, fq_nr)
                self.mark_finalizer_to_run(fq_index, newx)
                # we must also fix the state from 2 to 3 here, otherwise
                # we leave the GCFLAG_FINALIZATION_ORDERING bit behind
                # which will confuse the next collection
                self._recursively_bump_finalization_state_from_2_to_3(x)
            else:
                new_with_finalizer.append(newx)
                new_with_finalizer.append(fq_nr)

        self.tmpstack.delete()
        pending.delete()
        marked.delete()
        self.objects_with_finalizers.delete()
        self.objects_with_finalizers = new_with_finalizer
        return scan
Esempio n. 47
0
    def deal_with_objects_with_finalizers(self, scan):
        # walk over list of objects with finalizers
        # if it is not copied, add it to the list of to-be-called finalizers
        # and copy it, to me make the finalizer runnable
        # We try to run the finalizers in a "reasonable" order, like
        # CPython does.  The details of this algorithm are in
        # pypy/doc/discussion/finalizer-order.txt.
        new_with_finalizer = self.AddressDeque()
        marked = self.AddressDeque()
        pending = self.AddressStack()
        self.tmpstack = self.AddressStack()
        while self.objects_with_finalizers.non_empty():
            x = self.objects_with_finalizers.popleft()
            fq_nr = self.objects_with_finalizers.popleft()
            ll_assert(self._finalization_state(x) != 1, 
                      "bad finalization state 1")
            if self.surviving(x):
                new_with_finalizer.append(self.get_forwarding_address(x))
                new_with_finalizer.append(fq_nr)
                continue
            marked.append(x)
            marked.append(fq_nr)
            pending.append(x)
            while pending.non_empty():
                y = pending.pop()
                state = self._finalization_state(y)
                if state == 0:
                    self._bump_finalization_state_from_0_to_1(y)
                    self.trace(y, self._append_if_nonnull, pending)
                elif state == 2:
                    self._recursively_bump_finalization_state_from_2_to_3(y)
            scan = self._recursively_bump_finalization_state_from_1_to_2(
                       x, scan)

        while marked.non_empty():
            x = marked.popleft()
            fq_nr = marked.popleft()
            state = self._finalization_state(x)
            ll_assert(state >= 2, "unexpected finalization state < 2")
            newx = self.get_forwarding_address(x)
            if state == 2:
                from rpython.rtyper.lltypesystem import rffi
                fq_index = rffi.cast(lltype.Signed, fq_nr)
                self.mark_finalizer_to_run(fq_index, newx)
                # we must also fix the state from 2 to 3 here, otherwise
                # we leave the GCFLAG_FINALIZATION_ORDERING bit behind
                # which will confuse the next collection
                self._recursively_bump_finalization_state_from_2_to_3(x)
            else:
                new_with_finalizer.append(newx)
                new_with_finalizer.append(fq_nr)

        self.tmpstack.delete()
        pending.delete()
        marked.delete()
        self.objects_with_finalizers.delete()
        self.objects_with_finalizers = new_with_finalizer
        return scan
Esempio n. 48
0
 def get_type_id(self, addr):
     tid = self.header(addr).tid
     ll_assert(tid & (GCFLAG_FORWARDED|GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
               "get_type_id on forwarded obj")
     # Non-prebuilt forwarded objects are overwritten with a FORWARDSTUB.
     # Although calling get_type_id() on a forwarded object works by itself,
     # we catch it as an error because it's likely that what is then
     # done with the typeid is bogus.
     return llop.extract_ushort(llgroup.HALFWORD, tid)
Esempio n. 49
0
 def command_allocating(uid, gcref):
     stuff = cast_gcref_to_instance(Stuff, gcref)
     # 'stuff' is just allocated; 'stuff.x' is not yet initialized
     dbstate.printed_stuff = stuff
     if dbstate.watch_future != -1:
         ll_assert(dbstate.watch_future == uid,
                   "watch_future out of sync")
         dbstate.watch_future = -1
         dbstate.metavar = stuff
def sscopy_attach_shadow_stack(sscopy):
    base = llop.gc_adr_of_root_stack_base(llmemory.Address).address[0]
    ll_assert(llop.gc_adr_of_root_stack_top(llmemory.Address).address[0]==base,
              "attach_shadow_stack: ss is not empty?")
    length_bytes = sscopy.signed[0]
    llmemory.raw_memcopy(sscopy + SIZEADDR, base, length_bytes)
    llop.gc_adr_of_root_stack_top(llmemory.Address).address[0] = (
        base + length_bytes)
    llmemory.raw_free(sscopy)
Esempio n. 51
0
 def popleft(self):
     ll_assert(self.non_empty(), "pop on empty AddressDeque")
     index = self.index_in_oldest
     if index == chunk_size:
         self.shrink()
         index = 0
     result = self.oldest_chunk.items[index]
     self.index_in_oldest = index + 1
     return result
Esempio n. 52
0
 def belongs_to_current_thread(framedata):
     # xxx obscure: the answer is Yes if, as a pointer, framedata
     # lies between the start of the current stack and the top of it.
     stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL)
     ll_assert(stack_start != llmemory.NULL,
               "current thread not found in gcdata.aid2stack!")
     stack_stop  = llop.stack_current(llmemory.Address)
     return (stack_start <= framedata <= stack_stop or
             stack_start >= framedata >= stack_stop)
Esempio n. 53
0
 def popleft(self):
     ll_assert(self.non_empty(), "pop on empty AddressDeque")
     index = self.index_in_oldest
     if index == chunk_size:
         self.shrink()
         index = 0
     result = self.oldest_chunk.items[index]
     self.index_in_oldest = index + 1
     return result
Esempio n. 54
0
 def get_type_id(self, addr):
     tid = self.header(addr).tid
     ll_assert(
         tid & (GCFLAG_FORWARDED | GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
         "get_type_id on forwarded obj")
     # Non-prebuilt forwarded objects are overwritten with a FORWARDSTUB.
     # Although calling get_type_id() on a forwarded object works by itself,
     # we catch it as an error because it's likely that what is then
     # done with the typeid is bogus.
     return llop.extract_ushort(llgroup.HALFWORD, tid)
Esempio n. 55
0
def ll_pop_nonneg(func, l, index):
    ll_assert(index >= 0, "unexpectedly negative list pop index")
    if func is dum_checkidx:
        if index >= l.ll_length():
            raise IndexError
    else:
        ll_assert(index < l.ll_length(), "list pop index out of bound")
    res = l.ll_getitem_fast(index)
    ll_delitem_nonneg(dum_nocheck, l, index)
    return res