Пример #1
0
 def allocate_object(self, offset, size, letter='x'):
     self.check()
     bytes = llmemory.raw_malloc_usage(size)
     if offset + bytes > self.nbytes:
         raise ArenaError("object overflows beyond the end of the arena")
     zero = True
     for c in self.usagemap[offset:offset+bytes]:
         if c == '0':
             pass
         elif c == '#':
             zero = False
         else:
             raise ArenaError("new object overlaps a previous object")
     assert offset not in self.objectptrs
     addr2 = size._raw_malloc([], zero=zero)
     pattern = letter.upper() + letter*(bytes-1)
     self.usagemap[offset:offset+bytes] = array.array('c', pattern)
     self.setobject(addr2, offset, bytes)
     # common case: 'size' starts with a GCHeaderOffset.  In this case
     # we can also remember that the real object starts after the header.
     while isinstance(size, RoundedUpForAllocation):
         size = size.basesize
     if (isinstance(size, llmemory.CompositeOffset) and
         isinstance(size.offsets[0], llmemory.GCHeaderOffset)):
         objaddr = addr2 + size.offsets[0]
         hdrbytes = llmemory.raw_malloc_usage(size.offsets[0])
         objoffset = offset + hdrbytes
         self.setobject(objaddr, objoffset, bytes - hdrbytes)
     return addr2
Пример #2
0
 def malloc_fixedsize_clear(self, typeid, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     if (has_finalizer or
         (raw_malloc_usage(size) > self.lb_young_fixedsize and
          raw_malloc_usage(size) > self.largest_young_fixedsize)):
         # ^^^ we do two size comparisons; the first one appears redundant,
         #     but it can be constant-folded if 'size' is a constant; then
         #     it almost always folds down to False, which kills the
         #     second comparison as well.
         ll_assert(not contains_weakptr, "wrong case for mallocing weakref")
         # "non-simple" case or object too big: don't use the nursery
         return SemiSpaceGC.malloc_fixedsize_clear(self, typeid, size,
                                                   has_finalizer,
                                                   is_finalizer_light,
                                                   contains_weakptr)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.nursery_free
     if raw_malloc_usage(totalsize) > self.nursery_top - result:
         result = self.collect_nursery()
     llarena.arena_reserve(result, totalsize)
     # GCFLAG_NO_YOUNG_PTRS is never set on young objs
     self.init_gc_object(result, typeid, flags=0)
     self.nursery_free = result + totalsize
     if contains_weakptr:
         self.young_objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #3
0
 def malloc_varsize_slowpath(self, typeid, length, force_nonmovable=False):
     # For objects that are too large, or when the nursery is exhausted.
     # In order to keep malloc_varsize_clear() as compact as possible,
     # we recompute what we need in this slow path instead of passing
     # it all as function arguments.
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + self.fixed_size(typeid)
     itemsize = self.varsize_item_sizes(typeid)
     offset_to_length = self.varsize_offset_to_length(typeid)
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise MemoryError()
     if self.has_gcptr_in_varsize(typeid):
         nonlarge_max = self.nonlarge_gcptrs_max
     else:
         nonlarge_max = self.nonlarge_max
     if force_nonmovable or raw_malloc_usage(totalsize) > nonlarge_max:
         result = self.malloc_varsize_marknsweep(totalsize)
         flags = self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS | GCFLAG_UNVISITED
     else:
         result = self.malloc_varsize_collecting_nursery(totalsize)
         flags = self.GCFLAGS_FOR_NEW_YOUNG_OBJECTS
     self.init_gc_object(result, typeid, flags)
     (result + size_gc_header + offset_to_length).signed[0] = length
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #4
0
 def try_obtain_free_space(self, needed):
     # XXX for bonus points do big objects differently
     needed = raw_malloc_usage(needed)
     if (self.red_zone >= 2 and self.space_size < self.max_space_size and
         self.double_space_size()):
         pass    # collect was done during double_space_size()
     else:
         self.semispace_collect()
     missing = needed - (self.top_of_space - self.free)
     if missing <= 0:
         return True      # success
     else:
         # first check if the object could possibly fit
         proposed_size = self.space_size
         while missing > 0:
             if proposed_size >= self.max_space_size:
                 return False    # no way
             missing -= proposed_size
             proposed_size *= 2
         # For address space fragmentation reasons, we double the space
         # size possibly several times, moving the objects at each step,
         # instead of going directly for the final size.  We assume that
         # it's a rare case anyway.
         while self.space_size < proposed_size:
             if not self.double_space_size():
                 return False
         ll_assert(needed <= self.top_of_space - self.free,
                      "double_space_size() failed to do its job")
         return True
Пример #5
0
    def make_a_nonmoving_copy(self, obj, objsize):
        # NB. the object can have a finalizer or be a weakref, but
        # it's not an issue.
        totalsize = self.size_gc_header() + objsize
        tid = self.header(obj).tid
        if tid & GCFLAG_HASHMASK:
            totalsize_incl_hash = totalsize + llmemory.sizeof(lltype.Signed)
        else:
            totalsize_incl_hash = totalsize
        newaddr = self.allocate_external_object(totalsize_incl_hash)
        if not newaddr:
            return llmemory.NULL   # can't raise MemoryError during a collect()
        self._nonmoving_copy_count += 1
        self._nonmoving_copy_size += raw_malloc_usage(totalsize)

        llmemory.raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            (newaddr + totalsize).signed[0] = hash
            tid |= GC_HASH_HASFIELD
        #
        # GCFLAG_UNVISITED is not set
        # GCFLAG_NO_HEAP_PTRS is not set either, conservatively.  It may be
        # set by the next collection's collect_last_generation_roots().
        # This old object is immediately put at generation 3.
        newobj = newaddr + self.size_gc_header()
        hdr = self.header(newobj)
        hdr.tid = tid | self.GCFLAGS_FOR_NEW_EXTERNAL_OBJECTS
        ll_assert(self.is_last_generation(newobj),
                  "make_a_nonmoving_copy: object too young")
        self.gen3_rawmalloced_objects.append(newobj)
        self.last_generation_root_objects.append(newobj)
        self.rawmalloced_objects_to_trace.append(newobj)   # visit me
        return newobj
Пример #6
0
 def malloc_varsize_collecting_nursery(self, totalsize):
     result = self.collect_nursery()
     ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result,
               "not enough room in malloc_varsize_collecting_nursery()")
     llarena.arena_reserve(result, totalsize)
     self.nursery_free = result + llarena.round_up_for_allocation(
         totalsize)
     return result
Пример #7
0
def allocate(ffi, nbytes):
    nbytes = llmemory.raw_malloc_usage(nbytes)
    if not we_are_translated():
        nbytes *= 2   # hack to account for the fact that raw_malloc_usage()
                      # returns an approximation, ignoring padding and alignment
    p = lltype.malloc(rffi.CCHARP.TO, nbytes, flavor='raw', zero=True)
    ffi._finalizer.free_mems.append(p)
    return p
Пример #8
0
def test_partial_arena_reset():
    a = arena_malloc(72, False)
    def reserve(i):
        b = a + i * llmemory.raw_malloc_usage(precomputed_size)
        arena_reserve(b, precomputed_size)
        return b
    blist = []
    plist = []
    for i in range(4):
        b = reserve(i)
        (b + llmemory.offsetof(SX, 'x')).signed[0] = 100 + i
        blist.append(b)
        plist.append(llmemory.cast_adr_to_ptr(b, SPTR))
    # clear blist[1] and blist[2] but not blist[0] nor blist[3]
    arena_reset(blist[1], llmemory.raw_malloc_usage(precomputed_size)*2, False)
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # re-reserve object at index 1 and 2
    blist[1] = reserve(1)
    blist[2] = reserve(2)
    # check via object pointers
    assert plist[0].x == 100
    assert plist[3].x == 103
    py.test.raises(RuntimeError, "plist[1].x")     # marked as freed
    py.test.raises(RuntimeError, "plist[2].x")     # marked as freed
    # but we can still cast the old ptrs to addresses, which compare equal
    # to the new ones we gotq
    assert llmemory.cast_ptr_to_adr(plist[1]) == blist[1]
    assert llmemory.cast_ptr_to_adr(plist[2]) == blist[2]
    # check via addresses
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 100
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[1] + llmemory.offsetof(SX, 'x')).signed[0]")
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
    # clear and zero-fill the area over blist[0] and blist[1]
    arena_reset(blist[0], llmemory.raw_malloc_usage(precomputed_size)*2, True)
    # re-reserve and check it's zero
    blist[0] = reserve(0)
    blist[1] = reserve(1)
    assert (blist[0] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[1] + llmemory.offsetof(SX, 'x')).signed[0] == 0
    assert (blist[3] + llmemory.offsetof(SX, 'x')).signed[0] == 103
    py.test.raises(lltype.UninitializedMemoryAccess,
          "(blist[2] + llmemory.offsetof(SX, 'x')).signed[0]")
Пример #9
0
Файл: ffi.py Проект: kidaa/pixie
 def fb_alloc(self, size):
     size = llmemory.raw_malloc_usage(size)
     if not self.bufferp:
         self.nb_bytes += size
         return lltype.nullptr(rffi.CCHARP.TO)
     else:
         result = self.bufferp
         self.bufferp = rffi.ptradd(result, size)
         return result
Пример #10
0
 def track_heap(self, adr):
     if self._tracked_dict.contains(adr):
         return
     self._tracked_dict.add(adr)
     idx = llop.get_member_index(lltype.Signed, self.get_type_id(adr))
     self._ll_typeid_map[idx].count += 1
     totsize = self.get_size(adr) + self.size_gc_header()
     self._ll_typeid_map[idx].size += llmemory.raw_malloc_usage(totsize)
     self.trace(adr, self.track_heap_parent, adr)
Пример #11
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        # Only use the nursery if there are not too many items.
        if not raw_malloc_usage(itemsize):
            too_many_items = False
        else:
            # The following line is usually constant-folded because both
            # min_nursery_size and itemsize are constants (the latter
            # due to inlining).
            maxlength_for_minimal_nursery = (self.min_nursery_size // 4 //
                                             raw_malloc_usage(itemsize))
            
            # The actual maximum length for our nursery depends on how
            # many times our nursery is bigger than the minimal size.
            # The computation is done in this roundabout way so that
            # only the only remaining computation is the following
            # shift.
            maxlength = maxlength_for_minimal_nursery << self.nursery_scale
            too_many_items = length > maxlength

        if (too_many_items or
            (raw_malloc_usage(size) > self.lb_young_var_basesize and
             raw_malloc_usage(size) > self.largest_young_var_basesize)):
            # ^^^ we do two size comparisons; the first one appears redundant,
            #     but it can be constant-folded if 'size' is a constant; then
            #     it almost always folds down to False, which kills the
            #     second comparison as well.
            return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size,
                                                    itemsize, offset_to_length)
        # with the above checks we know now that totalsize cannot be more
        # than about half of the nursery size; in particular, the + and *
        # cannot overflow
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size + itemsize * length
        result = self.nursery_free
        if raw_malloc_usage(totalsize) > self.nursery_top - result:
            result = self.collect_nursery()
        llarena.arena_reserve(result, totalsize)
        # GCFLAG_NO_YOUNG_PTRS is never set on young objs
        self.init_gc_object(result, typeid, flags=0)
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.nursery_free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #12
0
 def __sub__(self, other):
     if isinstance(other, llmemory.AddressOffset):
         other = llmemory.raw_malloc_usage(other)
     if is_valid_int(other):
         return self.arena.getaddr(self.offset - other)
     if isinstance(other, fakearenaaddress):
         if self.arena is not other.arena:
             raise ArenaError("The two addresses are from different arenas")
         return self.offset - other.offset
     return NotImplemented
Пример #13
0
 def alignof(self):
     align = self._alignof()
     if not we_are_translated():
         # obscure hack when untranslated, maybe, approximate, don't use
         if isinstance(align, llmemory.FieldOffset):
             align = rffi.sizeof(align.TYPE.y)
     else:
         # a different hack when translated, to avoid seeing constants
         # of a symbolic integer type
         align = llmemory.raw_malloc_usage(align)
     return align
Пример #14
0
 def fill_nursery_with_pinned_objects(self):
     typeid = self.get_type_id(T)
     size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
     raw_size = llmemory.raw_malloc_usage(size)
     object_mallocs = self.gc.nursery_size // raw_size
     for instance_nr in xrange(object_mallocs):
         ptr = self.malloc(T)
         adr = llmemory.cast_ptr_to_adr(ptr)
         ptr.someInt = 100 + instance_nr
         self.stackroots.append(ptr)
         self.gc.pin(adr)
Пример #15
0
 def __init__(self, arena_size, page_size, small_request_threshold):
     # 'small_request_threshold' is the largest size that we
     # can ask with self.malloc().
     self.arena_size = arena_size
     self.page_size = page_size
     self.small_request_threshold = small_request_threshold
     #
     # 'pageaddr_for_size': for each size N between WORD and
     # small_request_threshold (included), contains either NULL or
     # a pointer to a page that has room for at least one more
     # allocation of the given size.
     length = small_request_threshold / WORD + 1
     self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
                                        flavor='raw', zero=True,
                                        immortal=True)
     self.full_page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
                                             flavor='raw', zero=True,
                                             immortal=True)
     self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
                                           length, flavor='raw',
                                           immortal=True)
     self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
     assert page_size > self.hdrsize
     self.nblocks_for_size[0] = 0    # unused
     for i in range(1, length):
         self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
     #
     self.max_pages_per_arena = arena_size // page_size
     self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                       self.max_pages_per_arena,
                                       flavor='raw', zero=True,
                                       immortal=True)
     # this is used in mass_free() only
     self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
                                           self.max_pages_per_arena,
                                           flavor='raw', zero=True,
                                           immortal=True)
     #
     # the arena currently consumed; it must have at least one page
     # available, or be NULL.  The arena object that we point to is
     # not in any 'arenas_lists'.  We will consume all its pages before
     # we choose a next arena, even if there is a major collection
     # in-between.
     self.current_arena = ARENA_NULL
     #
     # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty
     self.min_empty_nfreepages = self.max_pages_per_arena
     #
     # part of current_arena might still contain uninitialized pages
     self.num_uninitialized_pages = 0
     #
     # the total memory used, counting every block in use, without
     # the additional bookkeeping stuff.
     self.total_memory_used = r_uint(0)
Пример #16
0
 def malloc(self, size):
     nsize = raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     #
     result = llarena.arena_malloc(nsize, False)
     llarena.arena_reserve(result, size)
     self.all_objects.append((result, nsize))
     self.total_memory_used += nsize
     return result
Пример #17
0
    def malloc_varsize_clear(self, typeid, length, size, itemsize,
                             offset_to_length):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size

        # Compute the maximal length that makes the object still
        # below 'nonlarge_max'.  All the following logic is usually
        # constant-folded because self.nonlarge_max, size and itemsize
        # are all constants (the arguments are constant due to
        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
        if self.has_gcptr_in_varsize(typeid):
            nonlarge_max = self.nonlarge_gcptrs_max
        else:
            nonlarge_max = self.nonlarge_max

        if not raw_malloc_usage(itemsize):
            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
        else:
            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
            maxlength = maxlength // raw_malloc_usage(itemsize)
            too_many_items = length > maxlength

        if not too_many_items:
            # With the above checks we know now that totalsize cannot be more
            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
            # Let's try to fit the object in the nursery.
            totalsize = nonvarsize + itemsize * length
            result = self.nursery_free
            if raw_malloc_usage(totalsize) <= self.nursery_top - result:
                llarena.arena_reserve(result, totalsize)
                # GCFLAG_NO_YOUNG_PTRS is never set on young objs
                self.init_gc_object(result, typeid, flags=0)
                (result + size_gc_header + offset_to_length).signed[0] = length
                self.nursery_free = result + llarena.round_up_for_allocation(
                    totalsize)
                return llmemory.cast_adr_to_ptr(result+size_gc_header,
                                                llmemory.GCREF)
        return self.malloc_varsize_slowpath(typeid, length)
Пример #18
0
 def malloc_varsize_marknsweep(self, totalsize):
     # In order to free the large objects from time to time, we
     # arbitrarily force a full collect() if none occurs when we have
     # allocated self.space_size + rawmalloced bytes of large objects.
     self._check_rawsize_alloced(raw_malloc_usage(totalsize))
     result = self.allocate_external_object(totalsize)
     if not result:
         raise MemoryError()
     # The parent classes guarantee zero-filled allocations, so we
     # need to follow suit.
     llmemory.raw_memclear(result, totalsize)
     size_gc_header = self.gcheaderbuilder.size_gc_header
     self.gen2_rawmalloced_objects.append(result + size_gc_header)
     return result
Пример #19
0
def arena_reserve(addr, size, check_alignment=True):
    """Mark some bytes in an arena as reserved, and returns addr.
    For debugging this can check that reserved ranges of bytes don't
    overlap.  The size must be symbolic; in non-translated version
    this is used to know what type of lltype object to allocate."""
    from rpython.memory.lltypelayout import memory_alignment
    addr = getfakearenaaddress(addr)
    letter = 'x'
    if llmemory.raw_malloc_usage(size) == 1:
        letter = 'b'    # for Byte-aligned allocations
    elif check_alignment and (addr.offset & (memory_alignment-1)) != 0:
        raise ArenaError("object at offset %d would not be correctly aligned"
                         % (addr.offset,))
    addr.arena.allocate_object(addr.offset, size, letter)
Пример #20
0
 def shrink_obj(self, offset, newsize):
     oldbytes = self.objectsizes[offset]
     newbytes = llmemory.raw_malloc_usage(newsize)
     assert newbytes <= oldbytes
     # fix self.objectsizes
     for i in range(newbytes):
         adr = offset + i
         if adr in self.objectsizes:
             assert self.objectsizes[adr] == oldbytes - i
             self.objectsizes[adr] = newbytes - i
     # fix self.usagemap
     for i in range(offset + newbytes, offset + oldbytes):
         assert self.usagemap[i] == 'x'
         self.usagemap[i] = '#'
Пример #21
0
def ll_write_final_null_char(s):
    """'s' is a low-level STR; writes a terminating NULL character after
    the other characters in 's'.  Warning, this only works because of
    the 'extra_item_after_alloc' hack inside the definition of STR.
    """
    from rpython.rtyper.lltypesystem import rffi
    PSTR = lltype.typeOf(s)
    assert has_final_null_char(PSTR) == 1
    n = llmemory.offsetof(PSTR.TO, 'chars')
    n += llmemory.itemoffsetof(PSTR.TO.chars, 0)
    n = llmemory.raw_malloc_usage(n)
    n += len(s.chars)
    # no GC operation from here!
    ptr = rffi.cast(rffi.CCHARP, s)
    ptr[n] = '\x00'
Пример #22
0
 def __add__(self, other):
     if is_valid_int(other):
         position = self.offset + other
     elif isinstance(other, llmemory.AddressOffset):
         # this is really some Do What I Mean logic.  There are two
         # possible meanings: either we want to go past the current
         # object in the arena, or we want to take the address inside
         # the current object.  Try to guess...
         bytes = llmemory.raw_malloc_usage(other)
         if self.offset in self.arena.objectsizes and bytes < self.arena.objectsizes[self.offset]:
             # looks like we mean "inside the object"
             return llmemory.fakeaddress.__add__(self, other)
         position = self.offset + bytes
     else:
         return NotImplemented
     return self.arena.getaddr(position)
Пример #23
0
 def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                          offset_to_length):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     nonvarsize = size_gc_header + size
     try:
         varsize = ovfcheck(itemsize * length)
         totalsize = ovfcheck(nonvarsize + varsize)
     except OverflowError:
         raise memoryError
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     (result + size_gc_header + offset_to_length).signed[0] = length
     self.free = result + llarena.round_up_for_allocation(totalsize)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #24
0
 def test_full_pinned_nursery_pin_fail(self):
     typeid = self.get_type_id(T)
     size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
     raw_size = llmemory.raw_malloc_usage(size)
     object_mallocs = self.gc.nursery_size // raw_size
     # just to be sure we do not run into the limit as we test not the limiter
     # but rather the case of a nursery full with pinned objects.
     assert object_mallocs < self.gc.max_number_of_pinned_objects
     for instance_nr in xrange(object_mallocs):
         ptr = self.malloc(T)
         adr = llmemory.cast_ptr_to_adr(ptr)
         ptr.someInt = 100 + instance_nr
         self.stackroots.append(ptr)
         self.gc.pin(adr)
     #
     # nursery should be full now, at least no space for another `T`.
     # Next malloc should fail.
     py.test.raises(Exception, self.malloc, T)
Пример #25
0
 def malloc_fixedsize_clear(self, typeid16, size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
Пример #26
0
def test_invoke(space):
    size = llmemory.raw_malloc_usage(llmemory.sizeof(CIF_DESCRIPTION, 2))
    cif_descr = lltype.malloc(CIF_DESCRIPTION_P.TO, size, flavor='raw')
    p_arg1 = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw')
    p_arg2 = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw')
    p_args = lltype.malloc(rffi.CCHARPP.TO, 2, flavor='raw')
    p_res = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
    w_proc_mul = space.execute("proc { |x, y| x * y }")
    w_proc_diff = space.execute("proc { |x, y| (x - y).abs }")
    w_callback_info = space.execute("""
    int32 = FFI::Type::INT32
    func_type = FFI::FunctionType.new(int32,
                [int32, int32])
    """)
    data_mul_w = _callback.Data(space, w_proc_mul, w_callback_info)
    data_diff_w = _callback.Data(space, w_proc_diff, w_callback_info)
    id_mul = compute_unique_id(data_mul_w)
    id_diff = compute_unique_id(data_diff_w)
    _callback.registration[id_mul] = _callback.Closure(data_mul_w)
    _callback.registration[id_diff] = _callback.Closure(data_diff_w)
    try:
        p_arg1[0] = rffi.cast(rffi.CHAR, 6)
        p_arg2[0] = rffi.cast(rffi.CHAR, 7)
        p_args[0] = p_arg1
        p_args[1] = p_arg2
        _callback.invoke(cif_descr,
                         rffi.cast(rffi.VOIDP, p_res),
                         rffi.cast(rffi.VOIDPP, p_args),
                         rffi.cast(rffi.VOIDP, id_mul))
        assert p_res[0] == 42
        _callback.invoke(cif_descr,
                         rffi.cast(rffi.VOIDP, p_res),
                         rffi.cast(rffi.VOIDPP, p_args),
                         rffi.cast(rffi.VOIDP, id_diff))
        assert p_res[0] == 1
    finally:
        lltype.free(cif_descr, flavor='raw')
        lltype.free(p_arg1, flavor='raw')
        lltype.free(p_arg2, flavor='raw')
        lltype.free(p_args, flavor='raw')
        lltype.free(p_res, flavor='raw')
Пример #27
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
     ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result,
                             llmemory.sizeof(llmemory.Address),
                             0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #28
0
 def malloc(self, size):
     """Allocate a block from a page in an arena."""
     nsize = llmemory.raw_malloc_usage(size)
     ll_assert(nsize > 0, "malloc: size is null or negative")
     ll_assert(nsize <= self.small_request_threshold,
               "malloc: size too big")
     ll_assert((nsize & (WORD - 1)) == 0, "malloc: size is not aligned")
     self.total_memory_used += r_uint(nsize)
     #
     # Get the page to use from the size
     size_class = nsize >> WORD_POWER_2
     page = self.page_for_size[size_class]
     if page == PAGE_NULL:
         page = self.allocate_new_page(size_class)
     #
     # The result is simply 'page.freeblock'
     result = page.freeblock
     if page.nfree > 0:
         #
         # The 'result' was part of the chained list; read the next.
         page.nfree -= 1
         freeblock = result.address[0]
         llarena.arena_reset(result, llmemory.sizeof(llmemory.Address), 0)
         #
     else:
         # The 'result' is part of the uninitialized blocks.
         freeblock = result + nsize
     #
     page.freeblock = freeblock
     #
     pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
     if freeblock - pageaddr > self.page_size - nsize:
         # This was the last free block, so unlink the page from the
         # chained list and put it in the 'full_page_for_size' list.
         self.page_for_size[size_class] = page.nextpage
         page.nextpage = self.full_page_for_size[size_class]
         self.full_page_for_size[size_class] = page
     #
     llarena.arena_reserve(result, _dummy_size(size))
     return result
Пример #29
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13*ssize, True)
    hdraddr = a + 3*ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header,
                                    lltype.Ptr(STUB))
    assert stub.t == '!'
Пример #30
0
def test_invoke(space):
    size = llmemory.raw_malloc_usage(llmemory.sizeof(CIF_DESCRIPTION, 2))
    cif_descr = lltype.malloc(CIF_DESCRIPTION_P.TO, size, flavor='raw')
    p_arg1 = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw')
    p_arg2 = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw')
    p_args = lltype.malloc(rffi.CCHARPP.TO, 2, flavor='raw')
    p_res = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
    w_proc_mul = space.execute("proc { |x, y| x * y }")
    w_proc_diff = space.execute("proc { |x, y| (x - y).abs }")
    w_callback_info = space.execute("""
    int32 = FFI::Type::INT32
    func_type = FFI::FunctionType.new(int32,
                [int32, int32])
    """)
    data_mul_w = _callback.Data(space, w_proc_mul, w_callback_info)
    data_diff_w = _callback.Data(space, w_proc_diff, w_callback_info)
    id_mul = compute_unique_id(data_mul_w)
    id_diff = compute_unique_id(data_diff_w)
    _callback.registration[id_mul] = _callback.Closure(data_mul_w)
    _callback.registration[id_diff] = _callback.Closure(data_diff_w)
    try:
        p_arg1[0] = rffi.cast(rffi.CHAR, 6)
        p_arg2[0] = rffi.cast(rffi.CHAR, 7)
        p_args[0] = p_arg1
        p_args[1] = p_arg2
        _callback.invoke(cif_descr, rffi.cast(rffi.VOIDP, p_res),
                         rffi.cast(rffi.VOIDPP, p_args),
                         rffi.cast(rffi.VOIDP, id_mul))
        assert p_res[0] == 42
        _callback.invoke(cif_descr, rffi.cast(rffi.VOIDP, p_res),
                         rffi.cast(rffi.VOIDPP, p_args),
                         rffi.cast(rffi.VOIDP, id_diff))
        assert p_res[0] == 1
    finally:
        lltype.free(cif_descr, flavor='raw')
        lltype.free(p_arg1, flavor='raw')
        lltype.free(p_arg2, flavor='raw')
        lltype.free(p_args, flavor='raw')
        lltype.free(p_res, flavor='raw')
Пример #31
0
def test_replace_object_with_stub():
    from rpython.memory.gcheader import GCHeaderBuilder
    HDR = lltype.Struct('HDR', ('x', lltype.Signed))
    S = lltype.GcStruct('S', ('y', lltype.Signed), ('z', lltype.Signed))
    STUB = lltype.GcStruct('STUB', ('t', lltype.Char))
    gcheaderbuilder = GCHeaderBuilder(HDR)
    size_gc_header = gcheaderbuilder.size_gc_header
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))

    a = arena_malloc(13 * ssize, True)
    hdraddr = a + 3 * ssize
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(S))
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 42
    obj = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(S))
    obj.y = -5
    obj.z = -6

    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    arena_reset(hdraddr, size_gc_header + llmemory.sizeof(S), False)
    arena_reserve(hdraddr, size_gc_header + llmemory.sizeof(STUB))

    # check that it possible to reach the newly reserved HDR+STUB
    # via the header of the old 'obj' pointer, both via the existing
    # 'hdraddr':
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    hdr.x = 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    stub.t = '!'

    # and via a (now-invalid) pointer to the old 'obj': (this is needed
    # because during a garbage collection there are still pointers to
    # the old 'obj' around to be fixed)
    hdraddr = llmemory.cast_ptr_to_adr(obj) - size_gc_header
    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
    assert hdr.x == 46
    stub = llmemory.cast_adr_to_ptr(hdraddr + size_gc_header, lltype.Ptr(STUB))
    assert stub.t == '!'
Пример #32
0
 def malloc_fixedsize_clear(self,
                            typeid16,
                            size,
                            has_finalizer=False,
                            is_finalizer_light=False,
                            contains_weakptr=False):
     size_gc_header = self.gcheaderbuilder.size_gc_header
     totalsize = size_gc_header + size
     result = self.free
     if raw_malloc_usage(totalsize) > self.top_of_space - result:
         result = self.obtain_free_space(totalsize)
     llarena.arena_reserve(result, totalsize)
     self.init_gc_object(result, typeid16)
     self.free = result + totalsize
     #if is_finalizer_light:
     #    self.objects_with_light_finalizers.append(result + size_gc_header)
     #else:
     if has_finalizer:
         self.objects_with_finalizers.append(result + size_gc_header)
     if contains_weakptr:
         self.objects_with_weakrefs.append(result + size_gc_header)
     return llmemory.cast_adr_to_ptr(result + size_gc_header,
                                     llmemory.GCREF)
Пример #33
0
 def reset(self, zero, start=0, size=None):
     self.check()
     if size is None:
         stop = self.nbytes
     else:
         stop = start + llmemory.raw_malloc_usage(size)
     assert 0 <= start <= stop <= self.nbytes
     for offset, ptr in self.objectptrs.items():
         size = self.objectsizes[offset]
         if offset < start:   # object is before the cleared area
             assert offset + size <= start, "object overlaps cleared area"
         elif offset + size > stop:  # object is after the cleared area
             assert offset >= stop, "object overlaps cleared area"
         else:
             obj = ptr._obj
             obj.__arena_location__[0] = False   # no longer valid
             del self.objectptrs[offset]
             del self.objectsizes[offset]
             obj._free()
     if zero in (1, 2):
         initialbyte = "0"
     else:
         initialbyte = "#"
     self.usagemap[start:stop] = array.array('c', initialbyte*(stop-start))
Пример #34
0
def ffiobj_init(ffi, module_name, version, types, w_globals, w_struct_unions,
                w_enums, w_typenames, w_includes):
    space = ffi.space

    # xxx force ll2ctypes conversion here.  This appears to be needed,
    # otherwise ll2ctypes explodes.  I don't want to know :-(
    rffi.cast(lltype.Signed, ffi.ctxobj)

    if version == -1 and not types:
        return
    if not (cffi1_module.VERSION_MIN <= version <= cffi1_module.VERSION_MAX):
        raise oefmt(
            space.w_ImportError,
            "cffi out-of-line Python module '%s' has unknown version %s",
            module_name, hex(version))

    if types:
        # unpack a string of 4-byte entries into an array of _cffi_opcode_t
        n = len(types) // 4
        ntypes = allocate_array(ffi, _CFFI_OPCODE_T, n)
        decoder = StringDecoder(ffi, types)
        for i in range(n):
            ntypes[i] = decoder.next_opcode()
        ffi.ctxobj.ctx.c_types = ntypes
        rffi.setintfield(ffi.ctxobj.ctx, 'c_num_types', n)
        ffi.cached_types = [None] * n

    if w_globals is not None:
        # unpack a tuple alternating strings and ints, each two together
        # describing one global_s entry with no specified address or size.
        # The int is only used with integer constants.
        globals_w = space.fixedview(w_globals)
        n = len(globals_w) // 2
        size = n * rffi.sizeof(GLOBAL_S) + n * rffi.sizeof(CDL_INTCONST_S)
        p = allocate(ffi, size)
        nglobs = rffi.cast(rffi.CArrayPtr(GLOBAL_S), p)
        p = rffi.ptradd(p,
                        llmemory.raw_malloc_usage(n * rffi.sizeof(GLOBAL_S)))
        nintconsts = rffi.cast(rffi.CArrayPtr(CDL_INTCONST_S), p)
        for i in range(n):
            decoder = StringDecoder(ffi, space.bytes_w(globals_w[i * 2]))
            nglobs[i].c_type_op = decoder.next_opcode()
            nglobs[i].c_name = decoder.next_name()
            op = getop(nglobs[i].c_type_op)
            if op == cffi_opcode.OP_CONSTANT_INT or op == cffi_opcode.OP_ENUM:
                w_integer = globals_w[i * 2 + 1]
                ll_set_cdl_realize_global_int(nglobs[i])
                bigint = space.bigint_w(w_integer)
                ullvalue = bigint.ulonglongmask()
                rffi.setintfield(nintconsts[i], 'neg', int(bigint.sign <= 0))
                rffi.setintfield(nintconsts[i], 'value', ullvalue)
        ffi.ctxobj.ctx.c_globals = nglobs
        rffi.setintfield(ffi.ctxobj.ctx, 'c_num_globals', n)

    if w_struct_unions is not None:
        # unpack a tuple of struct/unions, each described as a sub-tuple;
        # the item 0 of each sub-tuple describes the struct/union, and
        # the items 1..N-1 describe the fields, if any
        struct_unions_w = space.fixedview(w_struct_unions)
        n = len(struct_unions_w)
        nftot = 0  # total number of fields
        for i in range(n):
            nftot += space.len_w(struct_unions_w[i]) - 1
        nstructs = allocate_array(ffi, STRUCT_UNION_S, n)
        nfields = allocate_array(ffi, FIELD_S, nftot)
        nf = 0
        for i in range(n):
            # 'desc' is the tuple of strings (desc_struct, desc_field_1, ..)
            desc = space.fixedview(struct_unions_w[i])
            nf1 = len(desc) - 1
            decoder = StringDecoder(ffi, space.bytes_w(desc[0]))
            rffi.setintfield(nstructs[i], 'c_type_index',
                             decoder.next_4bytes())
            flags = decoder.next_4bytes()
            rffi.setintfield(nstructs[i], 'c_flags', flags)
            nstructs[i].c_name = decoder.next_name()
            if flags & (cffi_opcode.F_OPAQUE | cffi_opcode.F_EXTERNAL):
                rffi.setintfield(nstructs[i], 'c_size', -1)
                rffi.setintfield(nstructs[i], 'c_alignment', -1)
                rffi.setintfield(nstructs[i], 'c_first_field_index', -1)
                rffi.setintfield(nstructs[i], 'c_num_fields', 0)
                assert nf1 == 0
            else:
                rffi.setintfield(nstructs[i], 'c_size', -2)
                rffi.setintfield(nstructs[i], 'c_alignment', -2)
                rffi.setintfield(nstructs[i], 'c_first_field_index', nf)
                rffi.setintfield(nstructs[i], 'c_num_fields', nf1)
            for j in range(nf1):
                decoder = StringDecoder(ffi, space.bytes_w(desc[j + 1]))
                # this 'decoder' is for one of the other strings beyond
                # the first one, describing one field each
                type_op = decoder.next_opcode()
                nfields[nf].c_field_type_op = type_op
                rffi.setintfield(nfields[nf], 'c_field_offset', -1)
                if getop(type_op) != cffi_opcode.OP_NOOP:
                    field_size = decoder.next_4bytes()
                else:
                    field_size = -1
                rffi.setintfield(nfields[nf], 'c_field_size', field_size)
                nfields[nf].c_name = decoder.next_name()
                nf += 1
        assert nf == nftot
        ffi.ctxobj.ctx.c_struct_unions = nstructs
        ffi.ctxobj.ctx.c_fields = nfields
        rffi.setintfield(ffi.ctxobj.ctx, 'c_num_struct_unions', n)

    if w_enums:
        # unpack a tuple of strings, each of which describes one enum_s entry
        enums_w = space.fixedview(w_enums)
        n = len(enums_w)
        nenums = allocate_array(ffi, ENUM_S, n)
        for i in range(n):
            decoder = StringDecoder(ffi, space.bytes_w(enums_w[i]))
            rffi.setintfield(nenums[i], 'c_type_index', decoder.next_4bytes())
            rffi.setintfield(nenums[i], 'c_type_prim', decoder.next_4bytes())
            nenums[i].c_name = decoder.next_name()
            nenums[i].c_enumerators = decoder.next_name()
        ffi.ctxobj.ctx.c_enums = nenums
        rffi.setintfield(ffi.ctxobj.ctx, 'c_num_enums', n)

    if w_typenames:
        # unpack a tuple of strings, each of which describes one typename_s
        # entry
        typenames_w = space.fixedview(w_typenames)
        n = len(typenames_w)
        ntypenames = allocate_array(ffi, TYPENAME_S, n)
        for i in range(n):
            decoder = StringDecoder(ffi, space.bytes_w(typenames_w[i]))
            rffi.setintfield(ntypenames[i], 'c_type_index',
                             decoder.next_4bytes())
            ntypenames[i].c_name = decoder.next_name()
        ffi.ctxobj.ctx.c_typenames = ntypenames
        rffi.setintfield(ffi.ctxobj.ctx, 'c_num_typenames', n)

    if w_includes:
        from pypy.module._cffi_backend.ffi_obj import W_FFIObject
        #
        for w_parent_ffi in space.fixedview(w_includes):
            parent_ffi = space.interp_w(W_FFIObject, w_parent_ffi)
            ffi.included_ffis_libs.append((parent_ffi, None))
Пример #35
0
 def setup_method(self, m):
     BaseDirectGCTest.setup_method(self, m)
     size = llmemory.sizeof(S) + self.gc.gcheaderbuilder.size_gc_header
     self.size_of_S = llmemory.raw_malloc_usage(size)
Пример #36
0
 def op_raw_malloc_usage(self, size):
     assert lltype.typeOf(size) == lltype.Signed
     return llmemory.raw_malloc_usage(size)
Пример #37
0
def allocate_array(ffi, OF, nitems):
    nbytes = llmemory.raw_malloc_usage(rffi.sizeof(OF))
    p = allocate(ffi, nitems * nbytes)
    return rffi.cast(rffi.CArrayPtr(OF), p)
Пример #38
0
 def op_raw_malloc_usage(self, size):
     assert lltype.typeOf(size) == lltype.Signed
     return llmemory.raw_malloc_usage(size)
Пример #39
0
import py
from rpython.memory.gc.minimarkpage import ArenaCollection
from rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
from rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
from rpython.memory.gc.minimarkpage import _dummy_size
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr

NULL = llmemory.NULL
SHIFT = WORD
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))


def test_allocate_arena():
    ac = ArenaCollection(SHIFT + 64 * 20, 64, 1)
    ac.allocate_new_arena()
    assert ac.num_uninitialized_pages == 20
    upages = ac.current_arena.freepages
    upages + 64 * 20  # does not raise
    py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
    #
    ac = ArenaCollection(SHIFT + 64 * 20 + 7, 64, 1)
    ac.allocate_new_arena()
    assert ac.num_uninitialized_pages == 20
    upages = ac.current_arena.freepages
    upages + 64 * 20 + 7  # does not raise
    py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")


def test_allocate_new_page():
    pagesize = hdrsize + 16
Пример #40
0
    def sweep_rawmalloced_objects(self, generation):
        # free all the rawmalloced objects of the specified generation
        # that have not been marked
        if generation == 2:
            objects = self.gen2_rawmalloced_objects
            # generation 2 sweep: if A points to an object object B that
            # moves from gen2 to gen3, it's possible that A no longer points
            # to any gen2 object.  In this case, A remains a bit too long in
            # last_generation_root_objects, but this will be fixed by the
            # next collect_last_generation_roots().
        elif generation == 3:
            objects = self.gen3_rawmalloced_objects
            # generation 3 sweep: remove from last_generation_root_objects
            # all the objects that we are about to free
            gen3roots = self.last_generation_root_objects
            newgen3roots = self.AddressStack()
            while gen3roots.non_empty():
                obj = gen3roots.pop()
                if not (self.header(obj).tid & GCFLAG_UNVISITED):
                    newgen3roots.append(obj)
            gen3roots.delete()
            self.last_generation_root_objects = newgen3roots
        else:
            ll_assert(False, "bogus 'generation'")
            return 0  # to please the flowspace

        surviving_objects = self.AddressStack()
        # Help the flow space
        alive_count = alive_size = dead_count = dead_size = 0
        debug = have_debug_prints()
        while objects.non_empty():
            obj = objects.pop()
            tid = self.header(obj).tid
            if tid & GCFLAG_UNVISITED:
                if debug:
                    dead_count += 1
                    dead_size += raw_malloc_usage(self.get_size_incl_hash(obj))
                addr = obj - self.gcheaderbuilder.size_gc_header
                llmemory.raw_free(addr)
            else:
                if debug:
                    alive_count += 1
                alive_size += raw_malloc_usage(self.get_size_incl_hash(obj))
                if generation == 3:
                    surviving_objects.append(obj)
                elif generation == 2:
                    ll_assert((tid & GCFLAG_AGE_MASK) < GCFLAG_AGE_MAX,
                              "wrong age for generation 2 object")
                    tid += GCFLAG_AGE_ONE
                    if (tid & GCFLAG_AGE_MASK) == GCFLAG_AGE_MAX:
                        # the object becomes part of generation 3
                        self.gen3_rawmalloced_objects.append(obj)
                        # GCFLAG_NO_HEAP_PTRS not set yet, conservatively
                        self.last_generation_root_objects.append(obj)
                    else:
                        # the object stays in generation 2
                        tid |= GCFLAG_UNVISITED
                        surviving_objects.append(obj)
                    self.header(obj).tid = tid
        objects.delete()
        if generation == 2:
            self.gen2_rawmalloced_objects = surviving_objects
        elif generation == 3:
            self.gen3_rawmalloced_objects = surviving_objects
        debug_print("| [hyb] gen", generation, "nonmoving now alive: ",
                    alive_size, "bytes in", alive_count, "objs")
        debug_print("| [hyb] gen", generation, "nonmoving freed:     ",
                    dead_size, "bytes in", dead_count, "objs")
        return alive_size
Пример #41
0
 def reserve(i):
     b = a + i * llmemory.raw_malloc_usage(precomputed_size)
     arena_reserve(b, precomputed_size)
     return b
Пример #42
0
def test_arena():
    S = lltype.Struct('S', ('x', lltype.Signed))
    SPTR = lltype.Ptr(S)
    ssize = llmemory.raw_malloc_usage(llmemory.sizeof(S))
    myarenasize = 2 * ssize + 1
    a = arena_malloc(myarenasize, False)
    assert a != llmemory.NULL
    assert a + 3 != llmemory.NULL

    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    s1_ptr1.x = 1
    s1_ptr2 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr2.x == 1
    assert s1_ptr1 == s1_ptr2

    py.test.raises(
        ArenaError,
        arena_reserve,
        a + ssize + 1,  # misaligned
        llmemory.sizeof(S))
    arena_reserve(a + ssize + 1, llmemory.sizeof(S), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    py.test.raises(lltype.UninitializedMemoryAccess, 's2_ptr1.x')
    s2_ptr1.x = 2
    s2_ptr2 = cast_adr_to_ptr(a + ssize + 1, SPTR)
    assert s2_ptr2.x == 2
    assert s2_ptr1 == s2_ptr2
    assert s1_ptr1 != s2_ptr1
    assert not (s2_ptr2 == s1_ptr2)
    assert s1_ptr1 == cast_adr_to_ptr(a, SPTR)

    S2 = lltype.Struct('S2', ('y', lltype.Char))
    S2PTR = lltype.Ptr(S2)
    py.test.raises(lltype.InvalidCast, cast_adr_to_ptr, a, S2PTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 1, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize, SPTR)
    py.test.raises(ArenaError, cast_adr_to_ptr, a + 2 * ssize + 1, SPTR)
    py.test.raises(ArenaError, arena_reserve, a + 1, llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + ssize, llmemory.sizeof(S),
                   False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize,
                   llmemory.sizeof(S), False)
    py.test.raises(ArenaError, arena_reserve, a + 2 * ssize + 1,
                   llmemory.sizeof(S), False)

    arena_reset(a, myarenasize, True)
    py.test.raises(ArenaError, cast_adr_to_ptr, a, SPTR)
    arena_reserve(a, llmemory.sizeof(S))
    s1_ptr1 = cast_adr_to_ptr(a, SPTR)
    assert s1_ptr1.x == 0
    s1_ptr1.x = 5

    arena_reserve(a + ssize, llmemory.sizeof(S2), check_alignment=False)
    s2_ptr1 = cast_adr_to_ptr(a + ssize, S2PTR)
    assert s2_ptr1.y == '\x00'
    s2_ptr1.y = 'X'

    assert cast_adr_to_ptr(a + 0, SPTR).x == 5
    assert cast_adr_to_ptr((a + ssize + 1) - 1, S2PTR).y == 'X'

    assert (a + 4) - (a + 1) == 3
Пример #43
0
    def build_cif_descr(self, space):
        arg_types_w = self.arg_types_w
        w_ret_type = self.w_ret_type
        assert isinstance(w_ret_type, W_TypeObject)

        ffi_arg_types = []
        for w_arg_type in arg_types_w:
            assert isinstance(w_arg_type, W_TypeObject)
            ffi_arg_type = ffitype.ffi_types[w_arg_type.typeindex]
            ffi_arg_types.append(ffi_arg_type)
        ffi_ret_type = ffitype.ffi_types[w_ret_type.typeindex]

        nargs = len(ffi_arg_types)
        # XXX combine both mallocs with alignment
        size = llmemory.raw_malloc_usage(
            llmemory.sizeof(CIF_DESCRIPTION, nargs))
        if we_are_translated():
            cif_descr = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
            cif_descr = rffi.cast(CIF_DESCRIPTION_P, cif_descr)
        else:
            # gross overestimation of the length below, but too bad
            cif_descr = lltype.malloc(CIF_DESCRIPTION_P.TO, size, flavor='raw')
        assert cif_descr
        #
        size = rffi.sizeof(FFI_TYPE_P) * nargs
        atypes = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
        atypes = rffi.cast(FFI_TYPE_PP, atypes)
        assert atypes
        #
        cif_descr.abi = clibffi.FFI_DEFAULT_ABI
        cif_descr.nargs = nargs
        cif_descr.rtype = ffi_ret_type
        cif_descr.atypes = atypes
        #
        # first, enough room for an array of 'nargs' pointers
        exchange_offset = rffi.sizeof(rffi.CCHARP) * nargs
        exchange_offset = self.align_arg(exchange_offset)
        cif_descr.exchange_result = exchange_offset
        # cif_descr.exchange_result_libffi = exchange_offset
        #
        if BIG_ENDIAN:
            assert 0, 'missing support'
            # see _cffi_backend in pypy
        # then enough room for the result, rounded up to sizeof(ffi_arg)
        exchange_offset += max(rffi.getintfield(ffi_ret_type, 'c_size'),
                               SIZE_OF_FFI_ARG)

        # loop over args
        for i, ffi_arg in enumerate(ffi_arg_types):
            # XXX do we need the "must free" logic?
            exchange_offset = self.align_arg(exchange_offset)
            cif_descr.exchange_args[i] = exchange_offset
            atypes[i] = ffi_arg
            exchange_offset += rffi.getintfield(ffi_arg, 'c_size')

        # store the exchange data size
        cif_descr.exchange_size = exchange_offset
        #
        status = jit_ffi_prep_cif(cif_descr)
        #
        if status != clibffi.FFI_OK:
            raise space.error(space.w_RuntimeError,
                              "libffi failed to build this function type")
        #
        return cif_descr