def g(): ssize = llarena.round_up_for_allocation(llmemory.sizeof(S)) msize = llarena.round_up_for_allocation(llmemory.sizeof(M)) smsize = llarena.round_up_for_allocation(llmemory.sizeof(S), llmemory.sizeof(M)) mssize = llarena.round_up_for_allocation(llmemory.sizeof(M), llmemory.sizeof(S)) return ssize, msize, smsize, mssize
def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0 arena_free(a) return 42
def test_address_order(): a = arena_malloc(24, False) assert eq(a, a) assert lt(a, a + 1) assert lt(a + 5, a + 20) b = arena_malloc(24, False) if a > b: a, b = b, a assert lt(a, b) assert lt(a + 19, b) assert lt(a, b + 19) c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(c, precomputed_size) assert lt(b, c) assert lt(a, c) assert lt(llmemory.NULL, c) d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e)
def test_address_order(): a = arena_malloc(24, False) assert eq(a, a) assert lt(a, a+1) assert lt(a+5, a+20) b = arena_malloc(24, False) if a > b: a, b = b, a assert lt(a, b) assert lt(a+19, b) assert lt(a, b+19) c = b + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(c, precomputed_size) assert lt(b, c) assert lt(a, c) assert lt(llmemory.NULL, c) d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e)
def malloc_varsize_collecting_nursery(self, totalsize): result = self.collect_nursery() ll_assert(raw_malloc_usage(totalsize) <= self.nursery_top - result, "not enough room in malloc_varsize_collecting_nursery()") llarena.arena_reserve(result, totalsize) self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return result
def malloc_varsize_collecting_nursery(self, totalsize): result = self.collect_nursery() ll_assert( raw_malloc_usage(totalsize) <= self.nursery_top - result, "not enough room in malloc_varsize_collecting_nursery()") llarena.arena_reserve(result, totalsize) self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return result
def _get_size_for_typeid(self, obj, typeid): size = self.fixed_size(typeid) if self.is_varsize(typeid): lenaddr = obj + self.varsize_offset_to_length(typeid) length = lenaddr.signed[0] size += length * self.varsize_item_sizes(typeid) size = llarena.round_up_for_allocation(size) # XXX maybe we should parametrize round_up_for_allocation() # per GC; if we do, we also need to fix the call in # gctypelayout.encode_type_shape() return size
def encode_type_shape(builder, info, TYPE, index): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) if len(offsets) > 0: infobits |= T_HAS_GCPTR # fptrs = builder.special_funcptr_for_type(TYPE) if fptrs: if "finalizer" in fptrs: info.finalizer = fptrs["finalizer"] if "light_finalizer" in fptrs: info.finalizer = fptrs["light_finalizer"] infobits |= T_HAS_LIGHTWEIGHT_FINALIZER # if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if (isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == 'gc'): infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE
def encode_type_shape(builder, info, TYPE, index): """Encode the shape of the TYPE into the TYPE_INFO structure 'info'.""" offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) if len(offsets) > 0: infobits |= T_HAS_GCPTR # fptrs = builder.special_funcptr_for_type(TYPE) if fptrs: if "destructor" in fptrs: info.customfunc = fptrs["destructor"] if "old_style_finalizer" in fptrs: info.customfunc = fptrs["old_style_finalizer"] infobits |= T_HAS_OLDSTYLE_FINALIZER # if not TYPE._is_varsize(): info.fixedsize = llarena.round_up_for_allocation( llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size) # note about round_up_for_allocation(): in the 'info' table # we put a rounded-up size only for fixed-size objects. For # varsize ones, the GC must anyway compute the size at run-time # and round up that result. else: infobits |= T_IS_VARSIZE varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info) info.fixedsize = llmemory.sizeof(TYPE, 0) if isinstance(TYPE, lltype.Struct): ARRAY = TYPE._flds[TYPE._arrayfld] ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld) varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0) else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE if (isinstance(ARRAY.OF, lltype.Ptr) and ARRAY.OF.TO._gckind == 'gc'): infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0) assert isinstance(ARRAY, lltype.Array) if ARRAY.OF != lltype.Void: offsets = offsets_to_gc_pointers(ARRAY.OF) else: offsets = () if len(offsets) > 0: infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE info.infobits = infobits | T_KEY_VALUE
def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: return size if self.gc_ll_descr.translate_support_code: from rpython.rtyper.lltypesystem import llarena return llarena.round_up_for_allocation( size, self.gc_ll_descr.minimal_size_in_nursery) else: # non-translated: do it manually # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up
def round_up_for_allocation(self, size): if not self.round_up: return size if self.translate_support_code: from rpython.rtyper.lltypesystem import llarena return llarena.round_up_for_allocation( size, self.minimal_size_in_nursery) else: # non-translated: do it manually # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs size = max(size, 2 * WORD) return (size + WORD - 1) & ~(WORD - 1) # round up
def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) except OverflowError: raise memoryError result = self.free if raw_malloc_usage(totalsize) > self.top_of_space - result: result = self.obtain_free_space(totalsize) llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid16) (result + size_gc_header + offset_to_length).signed[0] = length self.free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size try: varsize = ovfcheck(itemsize * length) totalsize = ovfcheck(nonvarsize + varsize) except OverflowError: raise memoryError result = self.free if raw_malloc_usage(totalsize) > self.top_of_space - result: result = self.obtain_free_space(totalsize) llarena.arena_reserve(result, totalsize) self.init_gc_object(result, typeid16) (result + size_gc_header + offset_to_length).signed[0] = length self.free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length): # Only use the nursery if there are not too many items. if not raw_malloc_usage(itemsize): too_many_items = False else: # The following line is usually constant-folded because both # min_nursery_size and itemsize are constants (the latter # due to inlining). maxlength_for_minimal_nursery = (self.min_nursery_size // 4 // raw_malloc_usage(itemsize)) # The actual maximum length for our nursery depends on how # many times our nursery is bigger than the minimal size. # The computation is done in this roundabout way so that # only the only remaining computation is the following # shift. maxlength = maxlength_for_minimal_nursery << self.nursery_scale too_many_items = length > maxlength if (too_many_items or (raw_malloc_usage(size) > self.lb_young_var_basesize and raw_malloc_usage(size) > self.largest_young_var_basesize)): # ^^^ we do two size comparisons; the first one appears redundant, # but it can be constant-folded if 'size' is a constant; then # it almost always folds down to False, which kills the # second comparison as well. return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length) # with the above checks we know now that totalsize cannot be more # than about half of the nursery size; in particular, the + and * # cannot overflow size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) > self.nursery_top - result: result = self.collect_nursery() llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length): # Only use the nursery if there are not too many items. if not raw_malloc_usage(itemsize): too_many_items = False else: # The following line is usually constant-folded because both # min_nursery_size and itemsize are constants (the latter # due to inlining). maxlength_for_minimal_nursery = (self.min_nursery_size // 4 // raw_malloc_usage(itemsize)) # The actual maximum length for our nursery depends on how # many times our nursery is bigger than the minimal size. # The computation is done in this roundabout way so that # only the only remaining computation is the following # shift. maxlength = maxlength_for_minimal_nursery << self.nursery_scale too_many_items = length > maxlength if (too_many_items or (raw_malloc_usage(size) > self.lb_young_var_basesize and raw_malloc_usage(size) > self.largest_young_var_basesize)): # ^^^ we do two size comparisons; the first one appears redundant, # but it can be constant-folded if 'size' is a constant; then # it almost always folds down to False, which kills the # second comparison as well. return SemiSpaceGC.malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length) # with the above checks we know now that totalsize cannot be more # than about half of the nursery size; in particular, the + and * # cannot overflow size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) > self.nursery_top - result: result = self.collect_nursery() llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation(totalsize) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size # Compute the maximal length that makes the object still # below 'nonlarge_max'. All the following logic is usually # constant-folded because self.nonlarge_max, size and itemsize # are all constants (the arguments are constant due to # inlining) and self.has_gcptr_in_varsize() is constant-folded. if self.has_gcptr_in_varsize(typeid): nonlarge_max = self.nonlarge_gcptrs_max else: nonlarge_max = self.nonlarge_max if not raw_malloc_usage(itemsize): too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max else: maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) maxlength = maxlength // raw_malloc_usage(itemsize) too_many_items = length > maxlength if not too_many_items: # With the above checks we know now that totalsize cannot be more # than 'nonlarge_max'; in particular, the + and * cannot overflow. # Let's try to fit the object in the nursery. totalsize = nonvarsize + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) <= self.nursery_top - result: llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF) return self.malloc_varsize_slowpath(typeid, length)
def malloc_varsize_clear(self, typeid, length, size, itemsize, offset_to_length): size_gc_header = self.gcheaderbuilder.size_gc_header nonvarsize = size_gc_header + size # Compute the maximal length that makes the object still # below 'nonlarge_max'. All the following logic is usually # constant-folded because self.nonlarge_max, size and itemsize # are all constants (the arguments are constant due to # inlining) and self.has_gcptr_in_varsize() is constant-folded. if self.has_gcptr_in_varsize(typeid): nonlarge_max = self.nonlarge_gcptrs_max else: nonlarge_max = self.nonlarge_max if not raw_malloc_usage(itemsize): too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max else: maxlength = nonlarge_max - raw_malloc_usage(nonvarsize) maxlength = maxlength // raw_malloc_usage(itemsize) too_many_items = length > maxlength if not too_many_items: # With the above checks we know now that totalsize cannot be more # than 'nonlarge_max'; in particular, the + and * cannot overflow. # Let's try to fit the object in the nursery. totalsize = nonvarsize + itemsize * length result = self.nursery_free if raw_malloc_usage(totalsize) <= self.nursery_top - result: llarena.arena_reserve(result, totalsize) # GCFLAG_NO_YOUNG_PTRS is never set on young objs self.init_gc_object(result, typeid, flags=0) (result + size_gc_header + offset_to_length).signed[0] = length self.nursery_free = result + llarena.round_up_for_allocation( totalsize) return llmemory.cast_adr_to_ptr(result + size_gc_header, llmemory.GCREF) return self.malloc_varsize_slowpath(typeid, length)
d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e) SX = lltype.Struct('S', ('foo', lltype.Signed), ('x', lltype.Signed)) SPTR = lltype.Ptr(SX) precomputed_size = round_up_for_allocation(llmemory.sizeof(SX)) def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0
d = c + llmemory.offsetof(SX, 'x') assert lt(c, d) assert lt(b, d) assert lt(a, d) assert lt(llmemory.NULL, d) e = c + precomputed_size assert lt(d, e) assert lt(c, e) assert lt(b, e) assert lt(a, e) assert lt(llmemory.NULL, e) SX = lltype.Struct('S', ('foo',lltype.Signed), ('x',lltype.Signed)) SPTR = lltype.Ptr(SX) precomputed_size = round_up_for_allocation(llmemory.sizeof(SX)) def test_look_inside_object(): # this code is also used in translation tests below myarenasize = 50 a = arena_malloc(myarenasize, False) b = a + round_up_for_allocation(llmemory.sizeof(lltype.Char)) arena_reserve(b, precomputed_size) (b + llmemory.offsetof(SX, 'x')).signed[0] = 123 assert llmemory.cast_adr_to_ptr(b, SPTR).x == 123 llmemory.cast_adr_to_ptr(b, SPTR).x += 1 assert (b + llmemory.offsetof(SX, 'x')).signed[0] == 124 arena_reset(a, myarenasize, True) arena_reserve(b, round_up_for_allocation(llmemory.sizeof(SX))) assert llmemory.cast_adr_to_ptr(b, SPTR).x == 0 arena_free(a)