def g(): ssize = llarena.round_up_for_allocation(llmemory.sizeof(S)) msize = llarena.round_up_for_allocation(llmemory.sizeof(M)) smsize = llarena.round_up_for_allocation(llmemory.sizeof(S), llmemory.sizeof(M)) mssize = llarena.round_up_for_allocation(llmemory.sizeof(M), llmemory.sizeof(S)) return ssize, msize, smsize, mssize
def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0 arena_free(a) return 42
def test_gc_offsets(): STRUCT = lltype.GcStruct("S1", ("x", lltype.Signed), ("y", lltype.Char)) ARRAY = lltype.GcArray(lltype.Signed) s1 = llarena.round_up_for_allocation(llmemory.sizeof(STRUCT)) s2 = llmemory.offsetof(STRUCT, "x") s3 = llmemory.ArrayLengthOffset(ARRAY) s4 = llmemory.sizeof(ARRAY, 0) s5 = llmemory.ArrayItemsOffset(ARRAY) def fn(): return s1 * 100000000 + s2 * 1000000 + s3 * 10000 + s4 * 100 + s5 mod, f = compile_test(fn, [], gcpolicy="semispace") res = f() i1 = (res // 100000000) % 100 i2 = (res // 1000000) % 100 i3 = (res // 10000) % 100 i4 = (res // 100) % 100 i5 = (res // 1) % 100 assert i1 % 4 == 0 assert 12 <= i1 <= 24 assert 4 <= i2 <= i1 - 8 assert 4 <= i3 <= 12 assert i4 == i5 assert i3 + 4 <= i5
def encode_type_shape(builder, info, TYPE): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) info.ofstoptrs = builder.offsets2table(offsets, TYPE) info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE) info.weakptrofs = weakpointer_offset(TYPE) if not TYPE._is_varsize(): #info.isvarsize = False #info.gcptrinvarsize = False info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE)) info.ofstolength = -1 # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: #info.isvarsize = True info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: ARRAY = TYPE info.ofstolength = llmemory.ArrayLengthOffset(ARRAY) info.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) info.varitemsize = llmemory.sizeof(ARRAY.OF)
def test_address_order(): a = arena_malloc(24, False) assert eq(a, a) assert lt(a, a + 1) assert lt(a + 5, a + 20) b = arena_malloc(24, False) if a > b: a, b = b, a assert lt(a, b) assert lt(a + 19, b) assert lt(a, b + 19) c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(c, precomputed_size) assert lt(b, c) assert lt(a, c) assert lt(llmemory.NULL, c) d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer=False): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) except OverflowError: raise memoryError result = self.free if raw_malloc_usage(totalsize) > self.top_of_space - result: if not can_collect: raise memoryError result = self.obtain_free_space(totalsize) llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid) (result + size_gc_header + offset_to_length).signed[0] = length self.free = result + llarena.round_up_for_allocation(totalsize) if has_finalizer: self.objects_with_finalizers.append(result + size_gc_header) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
def test_address_order(): a = arena_malloc(20, False) assert eq(a, a) assert lt(a, a+1) assert lt(a+5, a+20) b = arena_malloc(20, False) if a > b: a, b = b, a assert lt(a, b) assert lt(a+19, b) assert lt(a, b+19) c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(c, precomputed_size) assert lt(b, c) assert lt(a, c) assert lt(llmemory.NULL, c) d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e)
def test_gc_offsets(): STRUCT = lltype.GcStruct('S1', ('x', lltype.Signed), ('y', lltype.Char)) ARRAY = lltype.GcArray(lltype.Signed) s1 = llarena.round_up_for_allocation(llmemory.sizeof(STRUCT)) s2 = llmemory.offsetof(STRUCT, 'x') s3 = llmemory.ArrayLengthOffset(ARRAY) s4 = llmemory.sizeof(ARRAY, 0) s5 = llmemory.ArrayItemsOffset(ARRAY) def fn(): return (s1 * 100000000 + s2 * 1000000 + s3 * 10000 + s4 * 100 + s5) mod, f = compile_test(fn, [], gcpolicy="semispace") res = f() i1 = (res // 100000000) % 100 i2 = (res // 1000000) % 100 i3 = (res // 10000) % 100 i4 = (res // 100) % 100 i5 = (res // 1) % 100 assert i1 % 4 == 0 assert 12 <= i1 <= 24 assert 4 <= i2 <= i1 - 8 assert 4 <= i3 <= 12 assert i4 == i5 assert i3 + 4 <= i5
def encode_type_shape(builder, info, TYPE): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) info.ofstoptrs = builder.offsets2table(offsets, TYPE) info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE) info.weakptrofs = weakpointer_offset(TYPE) if not TYPE._is_varsize(): #info.isvarsize = False #info.gcptrinvarsize = False info.fixedsize = llarena.round_up_for_allocation(llmemory.sizeof(TYPE)) info.ofstolength = -1 # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: #info.isvarsize = True info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: ARRAY = TYPE info.ofstolength = llmemory.ArrayLengthOffset(ARRAY) info.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) info.varitemsize = llmemory.sizeof(ARRAY.OF)
def malloc_varsize_collecting_nursery(self, totalsize): result = self.collect_nursery() ll_assert( raw_malloc_usage(totalsize) <= self.nursery_top - result, "not enough room in malloc_varsize_collecting_nursery()") llarena.arena_reserve(result, totalsize) self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return result
def malloc_varsize_collecting_nursery(self, totalsize): result = self.collect_nursery() ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result, "not enough room in malloc_varsize_collecting_nursery()") llarena.arena_reserve(result, totalsize) self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return result
def encode_type_shape(builder, info, TYPE, index): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) # kind_and_fptr = builder.special_funcptr_for_type(TYPE) if kind_and_fptr is not None: kind, fptr = kind_and_fptr info.finalizer_or_customtrace = fptr if kind == "finalizer": infobits |= T_HAS_FINALIZER elif kind == 'light_finalizer': infobits |= T_HAS_FINALIZER | T_HAS_LIGHTWEIGHT_FINALIZER elif kind == "custom_trace": infobits |= T_HAS_CUSTOM_TRACE else: assert 0, kind # if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if (isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == 'gc'): infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE
def get_size(self, obj): typeid = self.get_type_id(obj) size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) length = lenaddr.signed[0] size += length * self.varsize_item_sizes(typeid) size = llarena.round_up_for_allocation(size) return size
def encode_type_shape(builder, info, TYPE, index): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) # kind_and_fptr = builder.special_funcptr_for_type(TYPE) if kind_and_fptr is not None: kind, fptr = kind_and_fptr info.finalizer_or_customtrace = fptr if kind == "finalizer": infobits |= T_HAS_FINALIZER elif kind == "light_finalizer": infobits |= T_HAS_FINALIZER | T_HAS_LIGHTWEIGHT_FINALIZER elif kind == "custom_trace": infobits |= T_HAS_CUSTOM_TRACE else: assert 0, kind # if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation(llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == "gc": infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE
def _get_size_for_typeid(self, obj, typeid): size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) length = lenaddr.signed[0] size += length * self.varsize_item_sizes(typeid) size = llarena.round_up_for_allocation(size) # XXX maybe we should parametrize round_up_for_allocation() # per GC; if we do, we also need to fix the call in # gctypelayout.encode_type_shape() return size
def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: return size if self.gc_ll_descr.translate_support_code: from pypy.rpython.lltypesystem import llarena return llarena.round_up_for_allocation( size, self.gc_ll_descr.minimal_size_in_nursery) else: # non-translated: do it manually # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs size = max(size, 2 * WORD) return (size + WORD - 1) & ~(WORD - 1) # round up
def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: return size if self.gc_ll_descr.translate_support_code: from pypy.rpython.lltypesystem import llarena return llarena.round_up_for_allocation( size, self.gc_ll_descr.minimal_size_in_nursery) else: # non-translated: do it manually # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up
def _get_totalsize_var(self, nonvarsize, itemsize, length): try: varsize = ovfcheck(itemsize * length) except OverflowError: raise MemoryError # Careful to detect overflows. The following works even if varsize # is almost equal to sys.maxint; morever, self.space_size is known # to be at least 4095 bytes smaller than sys.maxint, so this function # always raises instead of returning an integer >= sys.maxint-4095. if raw_malloc_usage(varsize) > self.space_size - raw_malloc_usage(nonvarsize): raise MemoryError return llarena.round_up_for_allocation(nonvarsize + varsize)
def _get_totalsize_var(self, nonvarsize, itemsize, length): try: varsize = ovfcheck(itemsize * length) except OverflowError: raise MemoryError # Careful to detect overflows. The following works even if varsize # is almost equal to sys.maxint; morever, self.space_size is known # to be at least 4095 bytes smaller than sys.maxint, so this function # always raises instead of returning an integer >= sys.maxint-4095. if (raw_malloc_usage(varsize) > self.space_size - raw_malloc_usage(nonvarsize)): raise MemoryError return llarena.round_up_for_allocation(nonvarsize + varsize)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer=False): # Only use the nursery if there are not too many items. if not raw_malloc_usage(itemsize): too_many_items = False else: # The following line is usually constant-folded because both # min_nursery_size and itemsize are constants (the latter # due to inlining). maxlength_for_minimal_nursery = (self.min_nursery_size // 4 // raw_malloc_usage(itemsize)) # The actual maximum length for our nursery depends on how # many times our nursery is bigger than the minimal size. # The computation is done in this roundabout way so that # only the only remaining computation is the following # shift. maxlength = maxlength_for_minimal_nursery << self.nursery_scale too_many_items = length > maxlength if (has_finalizer or not can_collect or too_many_items or (raw_malloc_usage(size) > self.lb_young_var_basesize and raw_malloc_usage(size) > self.largest_young_var_basesize)): # ^^^ we do two size comparisons; the first one appears redundant, # but it can be constant-folded if 'size' is a constant; then # it almost always folds down to False, which kills the # second comparison as well. return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer) # with the above checks we know now that totalsize cannot be more # than about half of the nursery size; in particular, the + and * # cannot overflow size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) > self.nursery_top - result: result = self.collect_nursery() llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer=False): if has_finalizer or not can_collect: return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer) size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size # Compute the maximal length that makes the object still # below 'nonlarge_max'. All the following logic is usually # constant-folded because self.nonlarge_max, size and itemsize # are all constants (the arguments are constant due to # inlining) and self.has_gcptr_in_varsize() is constant-folded. if self.has_gcptr_in_varsize(typeid): nonlarge_max = self.nonlarge_gcptrs_max else: nonlarge_max = self.nonlarge_max if not raw_malloc_usage(itemsize): too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max else: maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) maxlength = maxlength // raw_malloc_usage(itemsize) too_many_items = length > maxlength if not too_many_items: # With the above checks we know now that totalsize cannot be more # than 'nonlarge_max'; in particular, the + and * cannot overflow. # Let's try to fit the object in the nursery. totalsize = nonvarsize + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) <= self.nursery_top - result: llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF) return self.malloc_varsize_slowpath(typeid, length)
def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) except OverflowError: raise memoryError result = self.free if raw_malloc_usage(totalsize) > self.top_of_space - result: result = self.obtain_free_space(totalsize) llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid16) (result + size_gc_header + offset_to_length).signed[0] = length self.free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer=False): # Only use the nursery if there are not too many items. if not raw_malloc_usage(itemsize): too_many_items = False else: # The following line is usually constant-folded because both # min_nursery_size and itemsize are constants (the latter # due to inlining). maxlength_for_minimal_nursery = (self.min_nursery_size // 4 // raw_malloc_usage(itemsize)) # The actual maximum length for our nursery depends on how # many times our nursery is bigger than the minimal size. # The computation is done in this roundabout way so that # only the only remaining computation is the following # shift. maxlength = maxlength_for_minimal_nursery << self.nursery_scale too_many_items = length > maxlength if (has_finalizer or not can_collect or too_many_items or (raw_malloc_usage(size) > self.lb_young_var_basesize and raw_malloc_usage(size) > self.largest_young_var_basesize)): # ^^^ we do two size comparisons; the first one appears redundant, # but it can be constant-folded if 'size' is a constant; then # it almost always folds down to False, which kills the # second comparison as well. return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer) # with the above checks we know now that totalsize cannot be more # than about half of the nursery size; in particular, the + and * # cannot overflow size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) > self.nursery_top - result: result = self.collect_nursery() llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer=False): if has_finalizer or not can_collect: return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length, can_collect, has_finalizer) size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size # Compute the maximal length that makes the object still # below 'nonlarge_max'. All the following logic is usually # constant-folded because self.nonlarge_max, size and itemsize # are all constants (the arguments are constant due to # inlining) and self.has_gcptr_in_varsize() is constant-folded. if self.has_gcptr_in_varsize(typeid): nonlarge_max = self.nonlarge_gcptrs_max else: nonlarge_max = self.nonlarge_max if not raw_malloc_usage(itemsize): too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max else: maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) maxlength = maxlength // raw_malloc_usage(itemsize) too_many_items = length > maxlength if not too_many_items: # With the above checks we know now that totalsize cannot be more # than 'nonlarge_max'; in particular, the + and * cannot overflow. # Let's try to fit the object in the nursery. totalsize = nonvarsize + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) <= self.nursery_top - result: llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) return self.malloc_varsize_slowpath(typeid, length)
def encode_type_shape(builder, info, TYPE, index): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE) if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if (isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == 'gc'): infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if TYPE == WEAKREF: infobits |= T_IS_WEAKREF info.infobits = infobits
def encode_type_shape(builder, info, TYPE): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = 0 info.ofstoptrs = builder.offsets2table(offsets, TYPE) info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE) if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if (isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == 'gc'): infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if TYPE == WEAKREF: infobits |= T_IS_WEAKREF info.infobits = infobits
d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e) SX = lltype.Struct('S', ('foo', lltype.Signed), ('x', lltype.Signed)) SPTR = lltype.Ptr(SX) precomputed_size = round_up_for_allocation(llmemory.sizeof(SX)) def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e) SX = lltype.Struct('S', ('foo',lltype.Signed), ('x',lltype.Signed)) SPTR = lltype.Ptr(SX) precomputed_size = round_up_for_allocation(llmemory.sizeof(SX)) def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0 arena_free(a)