def ll_shrink_array(p, smallerlength): from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.objectmodel import keepalive_until_here if llop.shrink_array(lltype.Bool, p, smallerlength): return p # done by the GC # XXX we assume for now that the type of p is GcStruct containing a # variable array, with no further pointers anywhere, and exactly one # field in the fixed part -- like STR and UNICODE. TP = lltype.typeOf(p).TO newp = lltype.malloc(TP, smallerlength) assert len(TP._names) == 2 field = getattr(p, TP._names[0]) setattr(newp, TP._names[0], field) ARRAY = getattr(TP, TP._arrayfld) offset = llmemory.offsetof(TP, TP._arrayfld) + llmemory.itemoffsetof(ARRAY, 0) source_addr = llmemory.cast_ptr_to_adr(p) + offset dest_addr = llmemory.cast_ptr_to_adr(newp) + offset llmemory.raw_memcopy(source_addr, dest_addr, llmemory.sizeof(ARRAY.OF) * smallerlength) keepalive_until_here(p) keepalive_until_here(newp) return newp
def consider_constant(self, TYPE, value, gc): if value is not lltype.top_container(value): return if value in self.iseen_roots: return self.iseen_roots[value] = True if isinstance(TYPE, lltype.GcOpaqueType): self.consider_constant(lltype.typeOf(value.container), value.container, gc) return if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)): typeid = self.get_type_id(TYPE) hdr = gc.gcheaderbuilder.new_header(value) adr = llmemory.cast_ptr_to_adr(hdr) gc.init_gc_object_immortal(adr, typeid) self.all_prebuilt_gc.append(value) # The following collects the addresses of all the fields that have # a GC Pointer type, inside the current prebuilt object. All such # fields are potential roots: unless the structure is immutable, # they could be changed later to point to GC heap objects. adr = llmemory.cast_ptr_to_adr(value._as_ptr()) if TYPE._gckind == "gc": if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG: appendto = self.addresses_of_static_ptrs else: return else: appendto = self.addresses_of_static_ptrs_in_nongc for a in gc_pointers_inside(value, adr, mutable_only=True): appendto.append(a)
def pin_referenced_from_old(self, collect_func): # scenario: an old object points to a pinned one. Check if the pinned # object is correctly kept in the nursery and not moved. # # create old object old_ptr = self.malloc(S) old_ptr.someInt = 900 self.stackroots.append(old_ptr) assert self.stackroots[0] == old_ptr # validate our assumption collect_func() # make it old: move it out of the nursery old_ptr = self.stackroots[0] assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) # # create young pinned one and let the old one reference the young one pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(old_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) assert self.gc.pin(pinned_adr) assert self.gc.is_in_nursery(pinned_adr) assert old_ptr.next.someInt == 100 assert self.gc.pinned_objects_in_nursery == 1 # # do a collection run and make sure the pinned one didn't move collect_func() assert old_ptr.next.someInt == pinned_ptr.someInt == 100 assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr assert self.gc.is_in_nursery(pinned_adr)
def test_gc_pointers_inside(): from rpython.rtyper import rclass PT = lltype.Ptr(lltype.GcStruct('T')) S1 = lltype.GcStruct('S', ('x', PT), ('y', PT)) S2 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable': True}) accessor = rclass.FieldListAccessor() S3 = lltype.GcStruct('S', ('x', PT), ('y', PT), hints={'immutable_fields': accessor}) accessor.initialize(S3, {'x': IR_IMMUTABLE, 'y': IR_QUASIIMMUTABLE}) # s1 = lltype.malloc(S1) adr = llmemory.cast_ptr_to_adr(s1) lst = list(gc_pointers_inside(s1._obj, adr, mutable_only=True)) expected = [adr + llmemory.offsetof(S1, 'x'), adr + llmemory.offsetof(S1, 'y')] assert lst == expected or lst == expected[::-1] # s2 = lltype.malloc(S2) adr = llmemory.cast_ptr_to_adr(s2) lst = list(gc_pointers_inside(s2._obj, adr, mutable_only=True)) assert lst == [] # s3 = lltype.malloc(S3) adr = llmemory.cast_ptr_to_adr(s3) lst = list(gc_pointers_inside(s3._obj, adr, mutable_only=True)) assert lst == [adr + llmemory.offsetof(S3, 'y')]
def test_old_objects_pointing_to_pinned_not_exploading(self): # scenario: two old object, each pointing twice to a pinned object. # The internal 'old_objects_pointing_to_pinned' should contain # always two objects. # In previous implementation the list exploded (grew with every minor # collection), hence this test. old1_ptr = self.malloc(S) old1_ptr.someInt = 900 self.stackroots.append(old1_ptr) old2_ptr = self.malloc(S) old2_ptr.someInt = 800 self.stackroots.append(old2_ptr) pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) self.write(old1_ptr, 'next', pinned_ptr) self.write(old1_ptr, 'data', pinned_ptr) self.write(old2_ptr, 'next', pinned_ptr) self.write(old2_ptr, 'data', pinned_ptr) self.gc.collect() old1_ptr = self.stackroots[0] old2_ptr = self.stackroots[1] assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old1_ptr)) assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old2_ptr)) # do multiple rounds to make sure for _ in range(10): assert self.gc.old_objects_pointing_to_pinned.length() == 2 self.gc.debug_gc_step()
def pin_referenced_from_prebuilt(self, collect_func): # scenario: a prebuilt object points to a pinned object. Check if the # pinned object doesn't move and is still accessible. # prebuilt_ptr = lltype.malloc(S, immortal=True) prebuilt_ptr.someInt = 900 self.consider_constant(prebuilt_ptr) prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr) collect_func() # pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(prebuilt_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) assert self.gc.pin(pinned_adr) # # check if everything is as expected assert not self.gc.is_in_nursery(prebuilt_adr) assert self.gc.is_in_nursery(pinned_adr) assert pinned_ptr == prebuilt_ptr.next assert pinned_ptr.someInt == 100 # # do a collection and check again collect_func() assert self.gc.is_in_nursery(pinned_adr) assert pinned_ptr == prebuilt_ptr.next assert pinned_ptr.someInt == 100
def test_random(self): # scenario: create bunch of objects. randomly pin, unpin, add to # stackroots and remove from stackroots. import random for i in xrange(10**3): obj = self.malloc(T) obj.someInt = 100 # if random.random() < 0.5: self.stackroots.append(obj) print("+stack") if random.random() < 0.5: self.gc.pin(llmemory.cast_ptr_to_adr(obj)) print("+pin") self.gc.debug_gc_step(random.randint(1, 4)) for o in self.stackroots[:]: assert o.someInt == 100 o_adr = llmemory.cast_ptr_to_adr(o) if random.random() < 0.1 and self.gc._is_pinned(o_adr): print("-pin") self.gc.unpin(o_adr) if random.random() < 0.1: print("-stack") self.stackroots.remove(o)
def test_allocate_new_page(): pagesize = hdrsize + 16 arenasize = pagesize * 4 - 1 # def checknewpage(page, size_class): size = WORD * size_class assert (ac._nuninitialized(page, size_class) == (pagesize - hdrsize) // size) assert page.nfree == 0 page1 = page.freeblock - hdrsize assert llmemory.cast_ptr_to_adr(page) == page1 assert page.nextpage == PAGE_NULL # ac = ArenaCollection(arenasize, pagesize, 99) assert ac.num_uninitialized_pages == 0 assert ac.total_memory_used == 0 # page = ac.allocate_new_page(5) checknewpage(page, 5) assert ac.num_uninitialized_pages == 2 assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[5] == page # page = ac.allocate_new_page(3) checknewpage(page, 3) assert ac.num_uninitialized_pages == 1 assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page) assert ac.page_for_size[3] == page # page = ac.allocate_new_page(4) checknewpage(page, 4) assert ac.num_uninitialized_pages == 0 assert ac.page_for_size[4] == page
def test_pin_nursery_top_scenario1(self): ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) assert self.gc.pin(adr3) # scenario: no minor collection happened, only three mallocs # and pins # # +- nursery # | # v # +--------+--------+--------+---------------------...---+ # | pinned | pinned | pinned | empty | # +--------+--------+--------+---------------------...---+ # ^ ^ # | | # nursery_free -+ | # nursery_top -+ # assert adr3 < self.gc.nursery_free assert self.gc.nursery_free < self.gc.nursery_top
def test_write_barrier_marking_simple(self): for i in range(2): curobj = self.malloc(S) curobj.x = i self.stackroots.append(curobj) oldobj = self.stackroots[-1] oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0 self.gc.debug_gc_step_until(incminimark.STATE_MARKING) oldobj = self.stackroots[-1] # object shifted by minor collect oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0 self.gc._minor_collection() self.gc.visit_all_objects_step(1) assert oldhdr.tid & incminimark.GCFLAG_VISITED #at this point the first object should have been processed newobj = self.malloc(S) self.write(oldobj,'next',newobj) assert self.gc.header(self.gc.old_objects_pointing_to_young.tolist()[0]) == oldhdr self.gc._minor_collection() self.gc.debug_check_consistency()
def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, collect_static_in_prebuilt_gc, is_minor=False): gc = self.tester.gc layoutbuilder = self.tester.layoutbuilder if collect_static_in_prebuilt_gc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs: if addrofaddr.address[0]: collect_static_in_prebuilt_gc(gc, addrofaddr) if collect_static_in_prebuilt_nongc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc: if addrofaddr.address[0]: collect_static_in_prebuilt_nongc(gc, addrofaddr) if collect_stack_root: stackroots = self.tester.stackroots a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw') for i in range(len(a)): a[i] = llmemory.cast_ptr_to_adr(stackroots[i]) a_base = lltype.direct_arrayitems(a) for i in range(len(a)): ai = lltype.direct_ptradd(a_base, i) collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai)) for i in range(len(a)): PTRTYPE = lltype.typeOf(stackroots[i]) stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE) lltype.free(a, flavor='raw')
def test_prebuilt_list_of_addresses(self): from rpython.rtyper.lltypesystem import llmemory TP = lltype.Struct('x', ('y', lltype.Signed)) a = lltype.malloc(TP, flavor='raw', immortal=True) b = lltype.malloc(TP, flavor='raw', immortal=True) c = lltype.malloc(TP, flavor='raw', immortal=True) a_a = llmemory.cast_ptr_to_adr(a) a0 = llmemory.cast_ptr_to_adr(a) assert a_a is not a0 assert a_a == a0 a_b = llmemory.cast_ptr_to_adr(b) a_c = llmemory.cast_ptr_to_adr(c) d = {a_a: 3, a_b: 4, a_c: 5} d[a0] = 8 def func(i): if i == 0: ptr = a else: ptr = b return d[llmemory.cast_ptr_to_adr(ptr)] py.test.raises(TypeError, self.interpret, func, [0])
def pin_unpin_moved_stackroot(self, collect_func): # scenario: test if the pinned object is moved after being unpinned. # the second part of the scenario is the tested one. The first part # is already tests by other tests. ptr = self.malloc(T) ptr.someInt = 100 self.stackroots.append(ptr) assert self.stackroots[0] == ptr # validate our assumption adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) collect_func() # # from here on the test really starts. previouse logic is already tested # self.gc.unpin(adr) assert not self.gc._is_pinned(adr) assert self.gc.is_in_nursery(adr) # # now we do another collection and the object should be moved out of # the nursery. collect_func() new_adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(new_adr) assert self.stackroots[0].someInt == 100 with py.test.raises(RuntimeError) as exinfo: ptr.someInt = 200 assert "freed" in str(exinfo.value)
def _raw_memcopy_opaque(source, dest, size): # push push push at the llmemory interface (with hacks that are all # removed after translation) zero = llmemory.itemoffsetof(rffi.CCHARP.TO, 0) llmemory.raw_memcopy( llmemory.cast_ptr_to_adr(source) + zero, llmemory.cast_ptr_to_adr(dest) + zero, size * llmemory.sizeof(lltype.Char))
def setup(): s1 = lltype.malloc(S) tx = lltype.malloc(T) tx.z = 42 ty = lltype.malloc(T) s1.x = llmemory.cast_ptr_to_adr(tx) s1.y = llmemory.cast_ptr_to_adr(ty) return s1
def writearray(self, p, index, newvalue): if self.gc.needs_write_barrier: newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) if hasattr(self.gc, 'write_barrier_from_array'): self.gc.write_barrier_from_array(newaddr, addr_struct, index) else: self.gc.write_barrier(newaddr, addr_struct) p[index] = newvalue
def _setup_frame_realloc(self, translate_support_code): FUNC_TP = lltype.Ptr(lltype.FuncType([llmemory.GCREF, lltype.Signed], llmemory.GCREF)) base_ofs = self.get_baseofs_of_frame_field() def realloc_frame(frame, size): try: if not we_are_translated(): assert not self._exception_emulator[0] frame = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, frame) if size > frame.jf_frame_info.jfi_frame_depth: # update the frame_info size, which is for whatever reason # not up to date frame.jf_frame_info.update_frame_depth(base_ofs, size) new_frame = jitframe.JITFRAME.allocate(frame.jf_frame_info) frame.jf_forward = new_frame i = 0 while i < len(frame.jf_frame): new_frame.jf_frame[i] = frame.jf_frame[i] frame.jf_frame[i] = 0 i += 1 new_frame.jf_savedata = frame.jf_savedata new_frame.jf_guard_exc = frame.jf_guard_exc # all other fields are empty llop.gc_writebarrier(lltype.Void, new_frame) return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame) except Exception as e: print "Unhandled exception", e, "in realloc_frame" return lltype.nullptr(llmemory.GCREF.TO) def realloc_frame_crash(frame, size): print "frame", frame, "size", size return lltype.nullptr(llmemory.GCREF.TO) if not translate_support_code: fptr = llhelper(FUNC_TP, realloc_frame) else: FUNC = FUNC_TP.TO args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) mixlevelann.finish() self.realloc_frame = heaptracker.adr2int(llmemory.cast_ptr_to_adr(fptr)) if not translate_support_code: fptr = llhelper(FUNC_TP, realloc_frame_crash) else: FUNC = FUNC_TP.TO args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame_crash, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) mixlevelann.finish() self.realloc_frame_crash = heaptracker.adr2int(llmemory.cast_ptr_to_adr(fptr))
def setup(): rgc.register_custom_trace_hook(S, lambda_customtrace) s1 = lltype.malloc(S) tx = lltype.malloc(T) tx.z = 42 ty = lltype.malloc(T) s1.x = llmemory.cast_ptr_to_adr(tx) s1.y = llmemory.cast_ptr_to_adr(ty) return s1
def func(i): d = {} d[llmemory.cast_ptr_to_adr(a)] = 123 d[llmemory.cast_ptr_to_adr(b)] = 456 if i > 5: key = llmemory.cast_ptr_to_adr(a) else: key = llmemory.cast_ptr_to_adr(b) return d[key]
def test_pin_nursery_top_scenario5(self): ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) assert self.gc.pin(adr3) # scenario: no minor collection happened, only three mallocs # and pins # # +- nursery # | # v # +--------+--------+--------+---------------------...---+ # | pinned | pinned | pinned | empty | # +--------+--------+--------+---------------------...---+ # ^ ^ # | | # nursery_free -+ | # nursery_top -+ # assert adr3 < self.gc.nursery_free assert self.gc.nursery_free < self.gc.nursery_top # scenario: unpin everything and minor collection # # +- nursery # | # v # +----------------------------------+-------------...---+ # | reset arena | empty (not reset) | # +----------------------------------+-------------...---+ # ^ ^ # | | # +- nursery_free | # nursery_top -+ # self.gc.unpin(adr1) self.gc.unpin(adr2) self.gc.unpin(adr3) self.gc.collect() assert self.gc.nursery_free == self.gc.nursery assert self.gc.nursery_top > self.gc.nursery_free
def f(): a = lltype.malloc(ARR, 5, flavor='raw') a2 = lltype.malloc(ARR2, 6, flavor='raw') a2[0] = 1 a[0] = 3 adr = llmemory.cast_ptr_to_adr(a) adr2 = llmemory.cast_ptr_to_adr(a2) return ((adr + offsetx).signed[0] * 1000 + (adr + offsety).signed[0] * 100 + (adr2 + offsetx2).signed[0] * 10 + (adr2 + offsety2).signed[0])
def writebarrier_before_copy(self, source, dest, source_start, dest_start, length): if self.gc.needs_write_barrier: source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) return self.gc.writebarrier_before_copy(source_addr, dest_addr, source_start, dest_start, length) else: return True
def do_write_barrier(self, gcref_struct, gcref_newptr): hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) hdr_addr -= self.gcheaderbuilder.size_gc_header hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) if hdr.tid & self.GCClass.JIT_WB_IF_FLAG: # get a pointer to the 'remember_young_pointer' function from # the GC, and call it immediately llop1 = self.llop1 funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) funcptr(llmemory.cast_ptr_to_adr(gcref_struct))
def longername(a, b, size): if 1: baseofs = itemoffsetof(TP, 0) onesize = sizeof(TP.OF) size = baseofs + onesize * (size - 1) raw_memcopy(cast_ptr_to_adr(b) + baseofs, cast_ptr_to_adr(a) + baseofs, size) else: a = [] for i in range(x): a.append(i) return 0
def test_it_saves_a_pointer_to_whatever_address_was_given(self, space): int8_ptr = lltype.malloc(rffi.CArray(rffi.CHAR), 1, flavor='raw') adr = llmemory.cast_ptr_to_adr(int8_ptr) aint = llmemory.cast_adr_to_int(adr, mode='forced') w_ptr_obj = space.execute(""" ptr = FFI::Pointer.new(%s) """ % aint) adr = llmemory.cast_ptr_to_adr(w_ptr_obj.ptr) assert llmemory.cast_adr_to_int(adr, mode='forced') == aint lltype.free(int8_ptr, flavor='raw') assert not aint in ALLOCATED
def test_malloc_different_types(self): # scenario: malloc two objects of different type and pin them. Do a # minor and major collection in between. This test showed a bug that was # present in a previous implementation of pinning. obj1 = self.malloc(T) self.stackroots.append(obj1) assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1)) # self.gc.collect() # obj2 = self.malloc(T) self.stackroots.append(obj2) assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2))
def test_pin_bug2(self): # # * we have an old object A that points to a pinned object B # # * we unpin B # # * the next minor_collection() is done in STATE_MARKING==1 # when the object A is already black # # * _minor_collection() => _visit_old_objects_pointing_to_pinned() # which will move the now-unpinned B out of the nursery, to B' # # At that point we need to take care of colors, otherwise we # get a black object (A) pointing to a white object (B'), # which must never occur. # ptrA = self.malloc(T) ptrA.someInt = 42 adrA = llmemory.cast_ptr_to_adr(ptrA) res = self.gc.pin(adrA) assert res ptrC = self.malloc(S) self.stackroots.append(ptrC) ptrB = self.malloc(S) ptrB.data = ptrA self.stackroots.append(ptrB) self.gc.collect() ptrB = self.stackroots[-1] # now old and outside the nursery ptrC = self.stackroots[-2] # another random old object, traced later adrB = llmemory.cast_ptr_to_adr(ptrB) self.gc.minor_collection() assert self.gc.gc_state == self.STATE_SCANNING self.gc.major_collection_step() assert self.gc.gc_state == self.STATE_MARKING assert not (self.gc.header(adrB).tid & GCFLAG_VISITED) # not black yet self.gc.TEST_VISIT_SINGLE_STEP = True self.gc.major_collection_step() assert self.gc.gc_state == self.STATE_MARKING assert self.gc.header(adrB).tid & GCFLAG_VISITED # now black # but ptrC is not traced yet, which is why we're still in STATE_MARKING assert self.gc.old_objects_pointing_to_pinned.tolist() == [adrB] self.gc.unpin(adrA) self.gc.DEBUG = 2 self.gc.minor_collection()
def produce_into(self, builder, r): fail_subset = builder.subset_of_intvars(r) subset, f, exc = self.raising_func_code(builder, r) TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void) ptr = llhelper(lltype.Ptr(TP), f) c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) args = [c_addr] + subset descr = self.getcalldescr(builder, TP) self.put(builder, args, descr) exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu) op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(), descr=builder.getfaildescr()) op.setfailargs(fail_subset) builder.loop.operations.append(op)
def pin_shadow_2(self, collect_func): ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) ptr.someInt = 100 assert self.gc.pin(adr) self.gc.identityhash(ptr) # allocate shadow collect_func() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 self.gc.unpin(adr) collect_func() # move to shadow adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr)
def test_pinning_limit(self): assert self.gc.max_number_of_pinned_objects == 5 for instance_nr in xrange(self.gc.max_number_of_pinned_objects): ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) ptr.someInt = 100 + instance_nr self.stackroots.append(ptr) assert self.gc.pin(adr) # # now we reached the maximum amount of pinned objects ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) assert not self.gc.pin(adr)
def cast_fnptr_to_root(self, fnptr): return llmemory.cast_ptr_to_adr(fnptr)
def g(p): return bool(llmemory.cast_ptr_to_adr(p))
def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) return CPU_ARM.cast_adr_to_int(adr)
def test_try_pin_gcref_containing_type(self): # scenario: incminimark's object pinning can't pin objects that may # contain GC pointers obj = self.malloc(S) assert not self.gc.pin(llmemory.cast_ptr_to_adr(obj))
def setfield(self, obj, fieldname, fieldvalue): STRUCT = lltype.typeOf(obj).TO addr = llmemory.cast_ptr_to_adr(obj) addr += llmemory.offsetof(STRUCT, fieldname) self.setinterior(obj, addr, getattr(STRUCT, fieldname), fieldvalue)
def writebarrier_before_move(self, array): if self.gc.needs_write_barrier: array_addr = llmemory.cast_ptr_to_adr(array) return self.gc.writebarrier_before_move(array_addr)
def get_malloc_fn_addr(self, funcname): ll_func = self.get_malloc_fn(funcname) return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))
def get_write_barrier_fn(self, cpu): llop1 = self.llop1 funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr)
def bh_classof(self, struct): struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct) result_adr = llmemory.cast_ptr_to_adr(struct.typeptr) return heaptracker.adr2int(result_adr)
def get_rpy_memory_usage(gc, gcref): return gc.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref))
def _do_append_rpy_referents(gc, gcref, lst): gc._count_rpy = 0 gc._list_rpy = lst gc.trace(llmemory.cast_ptr_to_adr(gcref), _append_rpy_referent, gc) gc._list_rpy = None return gc._count_rpy
def get_rpy_type_index(gc, gcref): typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return gc.get_member_index(typeid)
def is_rpy_instance(gc, gcref): typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return gc.is_rpython_class(typeid)
def setarrayitem(self, array, index, newitem): ARRAY = lltype.typeOf(array).TO addr = llmemory.cast_ptr_to_adr(array) addr += llmemory.itemoffsetof(ARRAY, index) self.setinterior(array, addr, ARRAY.OF, newitem, (index,))
def cast_vtable_to_hashable(self, cpu, ptr): adr = llmemory.cast_ptr_to_adr(ptr) return heaptracker.adr2int(adr)
def _wrapkey(self, obj): return llmemory.cast_ptr_to_adr(obj._as_ptr())
def func(i): if i == 0: ptr = a else: ptr = b return d[llmemory.cast_ptr_to_adr(ptr)]
def shrink_array(self, p, smallersize): if hasattr(self.gc, 'shrink_array'): addr = llmemory.cast_ptr_to_adr(p) return self.gc.shrink_array(addr, smallersize) return False
def run_guards_translated(gcremovetypeptr): class A(object): pass class B(A): pass class C(B): pass def main(argv): A() B().foo = len(argv) C() return 0 t = TranslationContext() t.config.translation.gc = "minimark" t.config.translation.gcremovetypeptr = gcremovetypeptr ann = t.buildannotator() ann.build_types(main, [s_list_of_strings], main_entry_point=True) rtyper = t.buildrtyper() rtyper.specialize() classdef = ann.bookkeeper.getuniqueclassdef(B) rclass = getclassrepr(rtyper, classdef) rinstance = getinstancerepr(rtyper, classdef) LLB = rinstance.lowleveltype.TO ptr_vtable_B = rclass.getvtable() adr_vtable_B = llmemory.cast_ptr_to_adr(ptr_vtable_B) vtable_B = llmemory.cast_adr_to_int(adr_vtable_B, mode="symbolic") CPU = getcpuclass() cpu = CPU(rtyper, NoStats(), translate_support_code=True, gcdescr=get_description(t.config)) execute_token = cpu.make_execute_token(llmemory.GCREF) finaldescr = BasicFinalDescr() faildescr = BasicFailDescr() descr_B = cpu.sizeof(LLB, ptr_vtable_B) typeid_B = descr_B.get_type_id() fielddescr_B = cpu.fielddescrof(LLB, 'inst_foo') LLD = lltype.GcStruct('D', ('dd', lltype.Signed)) descr_D = cpu.sizeof(LLD) fielddescr_D = cpu.fielddescrof(LLD, 'dd') ARRAY = lltype.GcArray(lltype.Signed) arraydescr = cpu.arraydescrof(ARRAY) loop1 = parse(""" [p0] guard_class(p0, ConstInt(vtable_B), descr=faildescr) [] finish(descr=finaldescr) """, namespace={ 'finaldescr': finaldescr, 'faildescr': faildescr, 'vtable_B': vtable_B }) loop2 = parse(""" [p0] guard_gc_type(p0, ConstInt(typeid_B), descr=faildescr) [] finish(descr=finaldescr) """, namespace={ 'finaldescr': finaldescr, 'faildescr': faildescr, 'typeid_B': typeid_B }) loop3 = parse(""" [p0] guard_is_object(p0, descr=faildescr) [] finish(descr=finaldescr) """, namespace={ 'finaldescr': finaldescr, 'faildescr': faildescr }) loop4 = parse(""" [p0] guard_subclass(p0, ConstInt(vtable_B), descr=faildescr) [] finish(descr=finaldescr) """, namespace={ 'finaldescr': finaldescr, 'faildescr': faildescr, 'vtable_B': vtable_B }) def g(): cpu.setup_once() token1 = JitCellToken() token2 = JitCellToken() token3 = JitCellToken() token4 = JitCellToken() cpu.compile_loop(loop1.inputargs, loop1.operations, token1) cpu.compile_loop(loop2.inputargs, loop2.operations, token2) cpu.compile_loop(loop3.inputargs, loop3.operations, token3) cpu.compile_loop(loop4.inputargs, loop4.operations, token4) for token, p0 in [ (token1, rffi.cast(llmemory.GCREF, A())), (token1, rffi.cast(llmemory.GCREF, B())), (token1, rffi.cast(llmemory.GCREF, C())), (token2, rffi.cast(llmemory.GCREF, A())), (token2, rffi.cast(llmemory.GCREF, B())), (token2, rffi.cast(llmemory.GCREF, C())), (token2, rffi.cast(llmemory.GCREF, [42, 43])), (token3, rffi.cast(llmemory.GCREF, A())), (token3, rffi.cast(llmemory.GCREF, B())), (token3, rffi.cast(llmemory.GCREF, [44, 45])), (token4, rffi.cast(llmemory.GCREF, A())), (token4, rffi.cast(llmemory.GCREF, B())), (token4, rffi.cast(llmemory.GCREF, C())), ]: frame = execute_token(token, p0) descr = cpu.get_latest_descr(frame) if descr is finaldescr: print 'match' elif descr is faildescr: print 'fail' else: print '???' # if token is token2: # guard_gc_type print int(cpu.get_actual_typeid(p0) == typeid_B) if token is token3: # guard_is_object print int(cpu.check_is_object(p0)) for p0 in [ lltype.nullptr(llmemory.GCREF.TO), rffi.cast(llmemory.GCREF, A()), rffi.cast(llmemory.GCREF, B()), rffi.cast(llmemory.GCREF, C()), rffi.cast(llmemory.GCREF, lltype.malloc(LLD)), rffi.cast(llmemory.GCREF, lltype.malloc(ARRAY, 5)), rffi.cast(llmemory.GCREF, "foobar"), rffi.cast(llmemory.GCREF, u"foobaz") ]: results = ['B', 'D', 'A', 'S', 'U'] try: cpu.protect_speculative_field(p0, fielddescr_B) except SpeculativeError: results[0] = '-' try: cpu.protect_speculative_field(p0, fielddescr_D) except SpeculativeError: results[1] = '-' try: cpu.protect_speculative_array(p0, arraydescr) except SpeculativeError: results[2] = '-' try: cpu.protect_speculative_string(p0) except SpeculativeError: results[3] = '-' try: cpu.protect_speculative_unicode(p0) except SpeculativeError: results[4] = '-' print ''.join(results) call_initial_function(t, g) cbuilder = genc.CStandaloneBuilder(t, main, t.config) cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) cbuilder.compile() data = cbuilder.cmdexec('') assert data == ( 'fail\n' 'match\n' 'fail\n' 'fail\n' '0\n' 'match\n' '1\n' 'fail\n' '0\n' 'fail\n' '0\n' 'match\n' '1\n' 'match\n' '1\n' 'fail\n' '0\n' 'fail\n' 'match\n' 'match\n' '-----\n' # null '-----\n' # instance of A 'B----\n' # instance of B 'B----\n' # instance of C '-D---\n' '--A--\n' '---S-\n' '----U\n')
def test_prebuilt_not_pinnable(self): ptr = lltype.malloc(T, immortal=True) self.consider_constant(ptr) assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr)) self.gc.collect() assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
class LLtypeMixin(object): def get_class_of_box(self, box): base = box.getref_base() return lltype.cast_opaque_ptr(rclass.OBJECTPTR, base).typeptr node_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) node_vtable.name = rclass.alloc_array_name('node') node_vtable_adr = llmemory.cast_ptr_to_adr(node_vtable) node_vtable2 = lltype.malloc(OBJECT_VTABLE, immortal=True) node_vtable2.name = rclass.alloc_array_name('node2') node_vtable_adr2 = llmemory.cast_ptr_to_adr(node_vtable2) node_vtable3 = lltype.malloc(OBJECT_VTABLE, immortal=True) node_vtable3.name = rclass.alloc_array_name('node3') node_vtable_adr3 = llmemory.cast_ptr_to_adr(node_vtable3) cpu = runner.LLGraphCPU(None) NODE = lltype.GcForwardReference() S = lltype.GcForwardReference() NODE.become(lltype.GcStruct('NODE', ('parent', OBJECT), ('value', lltype.Signed), ('floatval', lltype.Float), ('charval', lltype.Char), ('nexttuple', lltype.Ptr(S)), ('next', lltype.Ptr(NODE)))) S.become(lltype.GcStruct('TUPLE', ('a', lltype.Signed), ('abis', lltype.Signed), ('b', lltype.Ptr(NODE)))) NODE2 = lltype.GcStruct('NODE2', ('parent', NODE), ('other', lltype.Ptr(NODE))) NODE3 = lltype.GcForwardReference() NODE3.become(lltype.GcStruct('NODE3', ('parent', OBJECT), ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] BIG = lltype.GcForwardReference() BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) for field, _ in big_fields: locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) node = lltype.malloc(NODE) node.value = 5 node.next = node node.parent.typeptr = node_vtable nodeaddr = lltype.cast_opaque_ptr(llmemory.GCREF, node) #nodebox = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node)) node2 = lltype.malloc(NODE2) node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) mynodeb.parent.typeptr = node_vtable myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) myptr2 = lltype.malloc(NODE2) myptr2.parent.parent.typeptr = node_vtable2 myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) nullptr = lltype.nullptr(llmemory.GCREF.TO) mynode3 = lltype.malloc(NODE3) mynode3.parent.typeptr = node_vtable3 mynode3.value = 7 mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) # a NODE2 mynode4 = lltype.malloc(NODE3) mynode4.parent.typeptr = node_vtable3 myptr4 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode4) # a NODE3 nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) node_tid = nodesize.get_type_id() nodesize2 = cpu.sizeof(NODE2, node_vtable2) nodesize3 = cpu.sizeof(NODE3, node_vtable3) valuedescr = cpu.fielddescrof(NODE, 'value') floatdescr = cpu.fielddescrof(NODE, 'floatval') chardescr = cpu.fielddescrof(NODE, 'charval') nextdescr = cpu.fielddescrof(NODE, 'next') nexttupledescr = cpu.fielddescrof(NODE, 'nexttuple') otherdescr = cpu.fielddescrof(NODE2, 'other') valuedescr3 = cpu.fielddescrof(NODE3, 'value') nextdescr3 = cpu.fielddescrof(NODE3, 'next') assert valuedescr3.is_always_pure() assert nextdescr3.is_always_pure() accessor = FieldListAccessor() accessor.initialize(None, {'inst_field': IR_QUASIIMMUTABLE}) QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), ('mutate_field', rclass.OBJECTPTR), hints={'immutable_fields': accessor}) quasisize = cpu.sizeof(QUASI, None) quasi = lltype.malloc(QUASI, immortal=True) quasi.inst_field = -4247 quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') quasiptr = lltype.cast_opaque_ptr(llmemory.GCREF, quasi) quasiimmutdescr = QuasiImmutDescr(cpu, quasiptr, quasifielddescr, cpu.fielddescrof(QUASI, 'mutate_field')) NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), ('intval', lltype.Signed)) INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), ('intval', lltype.Signed), hints={'immutable': True}) intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') immut = lltype.malloc(INTOBJ_IMMUT, zero=True) immutaddr = lltype.cast_opaque_ptr(llmemory.GCREF, immut) noimmut_descr = cpu.sizeof(INTOBJ_NOIMMUT, intobj_noimmut_vtable) immut_descr = cpu.sizeof(INTOBJ_IMMUT, intobj_immut_vtable) PTROBJ_IMMUT = lltype.GcStruct('PTROBJ_IMMUT', ('parent', OBJECT), ('ptrval', lltype.Ptr(OBJECT)), hints={'immutable': True}) ptrobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) ptrobj_immut_descr = cpu.sizeof(PTROBJ_IMMUT, ptrobj_immut_vtable) immut_ptrval = cpu.fielddescrof(PTROBJ_IMMUT, 'ptrval') arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) int32arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.INT)) int16arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.SHORT)) float32arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.SingleFloat)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) arrayref = lltype.cast_opaque_ptr(llmemory.GCREF, array) array2 = lltype.malloc(lltype.GcArray(lltype.Ptr(S)), 15, zero=True) array2ref = lltype.cast_opaque_ptr(llmemory.GCREF, array2) gcarraydescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF)) gcarraydescr_tid = gcarraydescr.get_type_id() floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) tupleaddr = lltype.cast_opaque_ptr(llmemory.GCREF, tpl) nodefull2 = lltype.malloc(NODE, zero=True) nodefull2addr = lltype.cast_opaque_ptr(llmemory.GCREF, nodefull2) ssize = cpu.sizeof(S, None) adescr = cpu.fielddescrof(S, 'a') abisdescr = cpu.fielddescrof(S, 'abis') bdescr = cpu.fielddescrof(S, 'b') #sbox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S))) arraydescr2 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(S))) T = lltype.GcStruct('TUPLE', ('c', lltype.Signed), ('d', lltype.Ptr(lltype.GcArray(lltype.Ptr(NODE))))) W_ROOT = lltype.GcStruct('W_ROOT', ('parent', OBJECT), ('inst_w_seq', llmemory.GCREF), ('inst_index', lltype.Signed), ('inst_w_list', llmemory.GCREF), ('inst_length', lltype.Signed), ('inst_start', lltype.Signed), ('inst_step', lltype.Signed)) inst_w_seq = cpu.fielddescrof(W_ROOT, 'inst_w_seq') inst_index = cpu.fielddescrof(W_ROOT, 'inst_index') inst_length = cpu.fielddescrof(W_ROOT, 'inst_length') inst_start = cpu.fielddescrof(W_ROOT, 'inst_start') inst_step = cpu.fielddescrof(W_ROOT, 'inst_step') inst_w_list = cpu.fielddescrof(W_ROOT, 'inst_w_list') w_root_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), ('one', lltype.Ptr(lltype.GcArray(lltype.Ptr(NODE))))) u_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) u_vtable_adr = llmemory.cast_ptr_to_adr(u_vtable) SIMPLE = lltype.GcStruct('simple', ('parent', OBJECT), ('value', lltype.Signed)) simplevalue = cpu.fielddescrof(SIMPLE, 'value') simple_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) simpledescr = cpu.sizeof(SIMPLE, simple_vtable) simple = lltype.malloc(SIMPLE, zero=True) simpleaddr = lltype.cast_opaque_ptr(llmemory.GCREF, simple) #usize = cpu.sizeof(U, ...) onedescr = cpu.fielddescrof(U, 'one') FUNC = lltype.FuncType([lltype.Signed], lltype.Signed) plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) elidablecalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([valuedescr], [], [], [valuedescr], [], [], EffectInfo.EF_ELIDABLE_CANNOT_RAISE)) elidable2calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([valuedescr], [], [], [valuedescr], [], [], EffectInfo.EF_ELIDABLE_OR_MEMORYERROR)) elidable3calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([valuedescr], [], [], [valuedescr], [], [], EffectInfo.EF_ELIDABLE_CAN_RAISE)) nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [adescr], [], [])) writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [adescr], [arraydescr], [])) writevalue3descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [valuedescr3], [], [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([nextdescr], [], [], [], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE, can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [arraydescr], [], [], [arraydescr], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) raw_malloc_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [], EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR)) raw_free_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) chararray = lltype.GcArray(lltype.Char) chararraydescr = cpu.arraydescrof(chararray) u2array = lltype.GcArray(rffi.USHORT) u2arraydescr = cpu.arraydescrof(u2array) nodefull = lltype.malloc(NODE2, zero=True) nodefull.parent.next = lltype.cast_pointer(lltype.Ptr(NODE), nodefull) nodefull.parent.nexttuple = tpl nodefulladdr = lltype.cast_opaque_ptr(llmemory.GCREF, nodefull) # array of structs (complex data) complexarray = lltype.GcArray( lltype.Struct("complex", ("real", lltype.Float), ("imag", lltype.Float), ) ) complexarraydescr = cpu.arraydescrof(complexarray) complexrealdescr = cpu.interiorfielddescrof(complexarray, "real") compleximagdescr = cpu.interiorfielddescrof(complexarray, "imag") complexarraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [complexarraydescr], [], [], [complexarraydescr], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) rawarraydescr = cpu.arraydescrof(lltype.Array(lltype.Signed, hints={'nolength': True})) rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) rawarraydescr_float = cpu.arraydescrof(lltype.Array(lltype.Float, hints={'nolength': True})) fc_array = lltype.GcArray( lltype.Struct( "floatchar", ("float", lltype.Float), ("char", lltype.Char))) fc_array_descr = cpu.arraydescrof(fc_array) fc_array_floatdescr = cpu.interiorfielddescrof(fc_array, "float") fc_array_chardescr = cpu.interiorfielddescrof(fc_array, "char") for _name, _os in [ ('strconcatdescr', 'OS_STR_CONCAT'), ('strslicedescr', 'OS_STR_SLICE'), ('strequaldescr', 'OS_STR_EQUAL'), ('streq_slice_checknull_descr', 'OS_STREQ_SLICE_CHECKNULL'), ('streq_slice_nonnull_descr', 'OS_STREQ_SLICE_NONNULL'), ('streq_slice_char_descr', 'OS_STREQ_SLICE_CHAR'), ('streq_nonnull_descr', 'OS_STREQ_NONNULL'), ('streq_nonnull_char_descr', 'OS_STREQ_NONNULL_CHAR'), ('streq_checknull_char_descr', 'OS_STREQ_CHECKNULL_CHAR'), ('streq_lengthok_descr', 'OS_STREQ_LENGTHOK'), ]: if _name in ('strconcatdescr', 'strslicedescr'): _extra = EffectInfo.EF_ELIDABLE_OR_MEMORYERROR else: _extra = EffectInfo.EF_ELIDABLE_CANNOT_RAISE _oopspecindex = getattr(EffectInfo, _os) locals()[_name] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [], _extra, oopspecindex=_oopspecindex)) # _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI')) locals()[_name.replace('str', 'unicode')] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [], _extra, oopspecindex=_oopspecindex)) s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [], EffectInfo.EF_ELIDABLE_CAN_RAISE, oopspecindex=EffectInfo.OS_STR2UNICODE)) # class LoopToken(AbstractDescr): pass asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced FUNC = lltype.FuncType([], lltype.Void) ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) vref_descr = cpu.sizeof(vrefinfo.JIT_VIRTUAL_REF, jit_virtual_ref_vtable) namespace = locals()
def test_pin_twice(self): ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) assert not self.gc.pin(adr)
def _walk_prebuilt_gc(self, collect): # debugging only! not RPython for obj in self.gcheap._all_prebuilt_gc: collect(llmemory.cast_ptr_to_adr(obj._as_ptr()))
def _do_count_rpy_referents(gc, gcref): gc._count_rpy = 0 gc.trace(llmemory.cast_ptr_to_adr(gcref), _count_rpy_referent, gc) return gc._count_rpy
def test_unpin_not_pinned(self): # this test checks a requirement of the unpin() interface ptr = self.malloc(S) py.test.raises(Exception, self.gc.unpin, llmemory.cast_ptr_to_adr(ptr))
def write(self, p, fieldname, newvalue): if self.gc.needs_write_barrier: addr_struct = llmemory.cast_ptr_to_adr(p) self.gc.write_barrier(addr_struct) setattr(p, fieldname, newvalue)
def f(): llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s)) return True
def cls_of_box(self, box): obj = box.getref(lltype.Ptr(rclass.OBJECT)) cls = llmemory.cast_ptr_to_adr(obj.typeptr) return history.ConstInt(heaptracker.adr2int(cls))
class FakeJitDriverSD: index_of_virtualizable = -1 _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) assembler_helper_adr = llmemory.cast_ptr_to_adr( _assembler_helper_ptr)
def walk_page(self, page, block_size, ok_to_free_func): """Walk over all objects in a page, and ask ok_to_free_func().""" # # 'freeblock' is the next free block freeblock = page.freeblock # # 'prevfreeblockat' is the address of where 'freeblock' was read from. prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock') prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat) # obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page)) obj += self.hdrsize surviving = 0 # initially skip_free_blocks = page.nfree # while True: # if obj == freeblock: # if skip_free_blocks == 0: # # 'obj' points to the first uninitialized block, # or to the end of the page if there are none. break # # 'obj' points to a free block. It means that # 'prevfreeblockat.address[0]' does not need to be updated. # Just read the next free block from 'obj.address[0]'. skip_free_blocks -= 1 prevfreeblockat = obj freeblock = obj.address[0] # else: # 'obj' points to a valid object. ll_assert(freeblock > obj, "freeblocks are linked out of order") # if ok_to_free_func(obj): # # The object should die. llarena.arena_reset(obj, _dummy_size(block_size), 0) llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address)) # Insert 'obj' in the linked list of free blocks. prevfreeblockat.address[0] = obj prevfreeblockat = obj obj.address[0] = freeblock # # Update the number of free objects in the page. page.nfree += 1 # else: # The object survives. surviving += 1 # obj += block_size # # Update the global total size of objects. self.total_memory_used += r_uint(surviving * block_size) # # Return the number of surviving objects. return surviving