def main(argv): rawrefcount.create_link_pypy(w1, ob1) w = None ob = lltype.nullptr(PyObjectS) oblist = [] for op in argv[1:]: revdb.stop_point() w = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY rawrefcount.create_link_pypy(w, ob) oblist.append(ob) del oblist[-1] # rgc.collect() assert rawrefcount.from_obj(PyObject, w) == ob assert rawrefcount.to_obj(W_Root, ob) == w while True: ob = rawrefcount.next_dead(PyObject) if not ob: break assert ob in oblist oblist.remove(ob) objectmodel.keepalive_until_here(w) revdb.stop_point() return 9
def func(): s1 = lltype.malloc(S1, 2) s1[0].p1 = lltype.malloc(R) s1[1].p1 = lltype.malloc(R) s2 = lltype.malloc(S2, 2) s2[0].p1 = lltype.malloc(R) s2[0].p2 = lltype.malloc(R) s2[1].p1 = lltype.malloc(R) s2[1].p2 = lltype.malloc(R) s3 = lltype.malloc(S3, 2) s3[0].p1 = lltype.malloc(R) s3[0].p2 = lltype.malloc(R) s3[0].p3 = lltype.malloc(R) s3[1].p1 = lltype.malloc(R) s3[1].p2 = lltype.malloc(R) s3[1].p3 = lltype.malloc(R) s1[0].p1.i = 100 s1[1].p1.i = 101 s2[0].p1.i = 102 s2[0].p2.i = 103 s2[1].p1.i = 104 s2[1].p2.i = 105 s3[0].p1.i = 106 s3[0].p2.i = 107 s3[0].p3.i = 108 s3[1].p1.i = 109 s3[1].p2.i = 110 s3[1].p3.i = 111 rgc.collect() return ((s1[0].p1.i == 100) + (s1[1].p1.i == 101) + (s2[0].p1.i == 102) + (s2[0].p2.i == 103) + (s2[1].p1.i == 104) + (s2[1].p2.i == 105) + (s3[0].p1.i == 106) + (s3[0].p2.i == 107) + (s3[0].p3.i == 108) + (s3[1].p1.i == 109) + (s3[1].p2.i == 110) + (s3[1].p3.i == 111))
def collect(space, generation=0): "Run a full collection. The optional argument is ignored." # First clear the method and the map cache. # See test_gc for an example of why. from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache cache = space.fromcache(MethodCache) cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() # if we are running in gc.disable() mode but gc.collect() is called, # we should still call the finalizers now. We do this as an attempt # to get closer to CPython's behavior: in Py3.5 some tests # specifically rely on that. This is similar to how, in CPython, an # explicit gc.collect() will invoke finalizers from cycles and fully # ignore the gc.disable() mode. temp_reenable = not space.user_del_action.enabled_at_app_level if temp_reenable: enable_finalizers(space) try: # fetch the pending finalizers from the queue, where they are # likely to have been added by rgc.collect() above, and actually # run them now. This forces them to run before this function # returns, and also always in the enable_finalizers() mode. space.user_del_action._run_finalizers() finally: if temp_reenable: disable_finalizers(space) return space.wrap(0)
def f(n): lst = ['a', 'b', 'c'] lst = rgc.resizable_list_supporting_raw_ptr(lst) lst.append(chr(n)) assert lst[3] == chr(n) assert lst[-1] == chr(n) # ptr = rgc.nonmoving_raw_ptr_for_resizable_list(lst) assert lst[:] == ['a', 'b', 'c', chr(n)] assert lltype.typeOf(ptr) == rffi.CCHARP assert [ptr[i] for i in range(4)] == ['a', 'b', 'c', chr(n)] # lst[-3] = 'X' assert ptr[1] == 'X' ptr[2] = 'Y' assert lst[-2] == 'Y' # addr = rffi.cast(lltype.Signed, ptr) ptr = rffi.cast(rffi.CCHARP, addr) rgc.collect() # should not move lst.items lst[-4] = 'g' assert ptr[0] == 'g' ptr[3] = 'H' assert lst[-1] == 'H' return lst
def entry_point(argv): ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger) rawrefcount.init(ll_dealloc_trigger_callback) ob, p = make_p() if state.seen != []: print "OB COLLECTED REALLY TOO SOON" return 1 rgc.collect() if state.seen != []: print "OB COLLECTED TOO SOON" return 1 objectmodel.keepalive_until_here(p) p = None rgc.collect() if state.seen != [1]: print "OB NOT COLLECTED" return 1 if rawrefcount.next_dead(PyObject) != ob: print "NEXT_DEAD != OB" return 1 if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS): print "NEXT_DEAD second time != NULL" return 1 if rawrefcount.to_obj(W_Root, ob) is not None: print "to_obj(dead) is not None?" return 1 rawrefcount.mark_deallocating(w_marker, ob) if rawrefcount.to_obj(W_Root, ob) is not w_marker: print "to_obj(marked-dead) is not w_marker" return 1 print "OK!" lltype.free(ob, flavor='raw') return 0
def f(): s = lltype.malloc(S) s.x = 42 llop.bare_setfield(lltype.Void, s0, void('next'), s) llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s0)) rgc.collect(0) return s0.next.x
def test_prebuilt_weakref(self): class A: pass a = A() a.hello = 42 r1 = weakref.ref(a) r2 = weakref.ref(A()) rgc.collect() assert r2() is None def fn(n): if n: r = r1 else: r = r2 a = r() rgc.collect() if a is None: return -5 else: return a.hello c_fn = self.getcompiled(fn, [int]) res = c_fn(1) assert res == 42 res = c_fn(0) assert res == -5
def main(argv): glob.ping = False lst1 = [X() for i in range(256)] lst = [X() for i in range(3000)] for i, x in enumerate(lst): x.baz = i fq.register_finalizer(x) for i in range(3000): lst[i] = None if i % 300 == 150: rgc.collect() revdb.stop_point() j = i + glob.ping * 1000000 assert foobar(j) == j if glob.ping: glob.ping = False total = 0 while True: x = fq.next_dead() if x is None: break total = intmask(total * 3 + x.baz) assert foobar(total) == total keepalive_until_here(lst1) return 9
def collect(space, generation=0): "Run a full collection. The optional argument is ignored." # First clear the method and the map cache. # See test_gc for an example of why. from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache cache = space.fromcache(MethodCache) cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() # if we are running in gc.disable() mode but gc.collect() is called, # we should still call the finalizers now. We do this as an attempt # to get closer to CPython's behavior: in Py3.5 some tests # specifically rely on that. This is similar to how, in CPython, an # explicit gc.collect() will invoke finalizers from cycles and fully # ignore the gc.disable() mode. temp_reenable = not space.user_del_action.enabled_at_app_level if temp_reenable: enable_finalizers(space) try: # fetch the pending finalizers from the queue, where they are # likely to have been added by rgc.collect() above, and actually # run them now. This forces them to run before this function # returns, and also always in the enable_finalizers() mode. space.user_del_action._run_finalizers() finally: if temp_reenable: disable_finalizers(space) return space.newint(0)
def main(argv): lst, keepalive = make(argv[0]) expected = ['prebuilt'] + [c for c in argv[0]] dead = [False] * len(lst) for j in range(17000): outp = [] for i in range(len(lst)): v = lst[i]() debug_print(v) if dead[i]: assert v is None elif v is None: outp.append('<DEAD>') dead[i] = True else: outp.append(v.s) assert v.s == expected[i] print ''.join(outp) if (j % 1000) == 999: debug_print('============= COLLECT ===========') rgc.collect() debug_print('------ done', j, '.') assert not dead[0] assert not dead[-1] keepalive_until_here(keepalive) revdb.stop_point() return 9
def entry_point(argv): rawrefcount.create_link_pypy(prebuilt_p, prebuilt_ob) prebuilt_ob.c_ob_refcnt += REFCNT_FROM_PYPY oblist = [make_ob() for i in range(50)] rgc.collect() deadlist = [] while True: ob = rawrefcount.next_dead(PyObject) if not ob: break if ob.c_ob_refcnt != 1: print "next_dead().ob_refcnt != 1" return 1 deadlist.append(ob) if len(deadlist) == 0: print "no dead object" return 1 if len(deadlist) < 30: print "not enough dead objects" return 1 for ob in deadlist: if ob not in oblist: print "unexpected value for dead pointer" return 1 oblist.remove(ob) print "OK!" lltype.free(ob, flavor='raw') return 0
def entry_point(argv): ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger) rawrefcount.init(ll_dealloc_trigger_callback) ob, p = make_p() if state.seen != []: print "OB COLLECTED REALLY TOO SOON" return 1 rgc.collect() if state.seen != []: print "OB COLLECTED TOO SOON" return 1 objectmodel.keepalive_until_here(p) p = None rgc.collect() if state.seen != [1]: print "OB NOT COLLECTED" return 1 if rawrefcount.next_dead(PyObject) != ob: print "NEXT_DEAD != OB" return 1 if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS): print "NEXT_DEAD second time != NULL" return 1 print "OK!" lltype.free(ob, flavor='raw') return 0
def g(): n = state.counter if n > 0: for i in range(5): state.a = A(n) state.a = None rgc.collect() return n
def f(): lst = lltype.malloc(A, 16) # 16 > 10 rgc.collect() sub(lst) null = lltype.nullptr(S) lst[15] = null # clear, so that A() is only visible via lst[0] rgc.collect() # -> crash return lst[0].x
def main(argv): lst = [X() for i in range(3000)] for i in range(3000): lst[i] = None if i % 300 == 150: rgc.collect() revdb.stop_point() return 9
def fn(n): if n > 0: x = BoxedObject(n) else: x = UnboxedObject(n) f.l.append(x) rgc.collect() return f.l[-1].meth(100)
def func(): #try: a = rgc.malloc_nonmovable(TP, 3, zero=True) rgc.collect() if a: assert not rgc.can_move(a) return 1 return 0
def fn(): a1 = A() a = objectmodel.instantiate(A, nonmovable=True) a.next = a1 # 'a' is known young here, so no write barrier emitted res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) rgc.collect() objectmodel.keepalive_until_here(a) return res
def f(): # numbers optimized for a 8MB space for n in [100000, 225000, 250000, 300000, 380000, 460000, 570000, 800000]: os.write(2, 'case %d\n' % n) rgc.collect() h(n) return -42
def f(): del state.freed[:] d = add_me() rgc.collect() # we want the dictionary to be really empty here. It's hard to # ensure in the current implementation after just one collect(), # but at least two collects should be enough. rgc.collect() return len(state.freed)
def f(): A() B() C() A() B() C() rgc.collect() return s.a_dels * 10 + s.b_dels
def test_destroy(self): # this used to give MemoryError in shadowstack tests for i in range(100000): self.status = 0 h = self.sthread.new(switchbackonce_callback, rffi.cast(llmemory.Address, 321)) # 'h' ignored if (i % 2000) == 1000: rgc.collect() # This should run in < 1.5GB virtual memory
def fn(n): rgc.collect() # check that a prebuilt tagged pointer doesn't explode if n > 0: x = BoxedObject(n) else: x = UnboxedObject(n) u.x = x # invoke write barrier rgc.collect() return x.meth(100)
def fn(): result = 0 for i in range(2): a = refs[i]() rgc.collect() if a is None: result += (i + 1) else: result += a.hello * (i + 1) return result
def main(n): states = f(n) rgc.collect() rgc.collect() err = 1001 for state in states: if state.num != 1001: err = state.num print 'ERROR:', err return err
def fn(n): rgc.collect( ) # check that a prebuilt tagged pointer doesn't explode if n > 0: x = BoxedObject(n) else: x = UnboxedObject(n) u.x = x # invoke write barrier rgc.collect() return x.meth(100)
def func(): try: a = rgc.malloc_nonmovable(TP) rgc.collect() if a: assert not rgc.can_move(a) return 1 return 0 except Exception: return 2
def fn(): l = lltype.malloc(TP, 100) for i in range(100): l[i] = lltype.malloc(TP.OF.TO, i) l2 = lltype.malloc(TP, 50) rgc.ll_arraycopy(l, l2, 40, 0, 50) rgc.collect() for i in range(50): assert l2[i] == l[40 + i] return 0
def fn(): s = StringBuilder(4) s.append("abcd") s.append("defg") s.append("rty") s.append_multiple_char('y', 1000) rgc.collect() s.append_multiple_char('y', 1000) res = s.build()[1000] rgc.collect() return ord(res)
def cb_stacklet_callback(h, arg): runner.steps.append(1) while True: assert not runner.sthread.is_empty_handle(h) h = runner.sthread.switch(h) assert not runner.sthread.is_empty_handle(h) if runner.steps[-1] == 9: return h runner.steps.append(4) rgc.collect() runner.steps.append(5)
def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return (ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24))
def fn(n): if n: r = r1 else: r = r2 a = r() rgc.collect() if a is None: return -5 else: return a.hello
def main(argv): glob.count = 0 lst = [X() for i in range(3000)] x = -1 for i in range(3000): lst[i] = None if i % 300 == 150: rgc.collect() revdb.stop_point() x = glob.count assert foobar(x) == x print x return 9
def collect(space, generation=0): "Run a full collection. The optional argument is ignored." # First clear the method and the map cache. # See test_gc for an example of why. from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache cache = space.fromcache(MethodCache) cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() _run_finalizers(space)
def fn(): objects = [] hashes = [] for i in range(200): rgc.collect(0) # nursery-only collection, if possible obj = A() objects.append(obj) hashes.append(compute_identity_hash(obj)) unique = {} for i in range(len(objects)): assert compute_identity_hash(objects[i]) == hashes[i] unique[hashes[i]] = None return len(unique)
def f(n, m, gc_can_shrink_array): ptr = lltype.malloc(STR, n) ptr.hash = 0x62 ptr.chars[0] = 'A' ptr.chars[1] = 'B' ptr.chars[2] = 'C' ptr2 = rgc.ll_shrink_array(ptr, 2) assert (ptr == ptr2) == gc_can_shrink_array rgc.collect() return ( ord(ptr2.chars[0]) + (ord(ptr2.chars[1]) << 8) + (len(ptr2.chars) << 16) + (ptr2.hash << 24))
def collect(space, generation=0): "Run a full collection. The optional argument is ignored." # First clear the method cache. See test_gc for an example of why. if space.config.objspace.std.withmethodcache: from pypy.objspace.std.typeobject import MethodCache cache = space.fromcache(MethodCache) cache.clear() if space.config.objspace.std.withmapdict: from pypy.objspace.std.mapdict import MapAttrCache cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() return space.wrap(0)
def f(): keys.append(K()) d = RWeakValueDictionary(K, X) x1, x3 = g(d) rgc.collect(); rgc.collect() assert d.get(keys[0]) is x1 assert d.get(keys[1]) is None assert d.get(keys[2]) is x3 assert d.get(keys[3]) is None d.set(keys[0], None) assert d.get(keys[0]) is None assert d.get(keys[1]) is None assert d.get(keys[2]) is x3 assert d.get(keys[3]) is None
def fn(n): id_prebuilt1 = compute_unique_id(u.x) if n > 0: x = BoxedObject(n) else: x = UnboxedObject(n) id_x1 = compute_unique_id(x) rgc.collect() # check that a prebuilt tagged pointer doesn't explode id_prebuilt2 = compute_unique_id(u.x) id_x2 = compute_unique_id(x) print u.x, id_prebuilt1, id_prebuilt2 print x, id_x1, id_x2 return ((id_x1 == id_x2) * 1 + (id_prebuilt1 == id_prebuilt2) * 10 + (id_x1 != id_prebuilt1) * 100)
def fn(n): id_prebuilt1 = compute_unique_id(u.x) if n > 0: x = BoxedObject(n) else: x = UnboxedObject(n) id_x1 = compute_unique_id(x) rgc.collect( ) # check that a prebuilt tagged pointer doesn't explode id_prebuilt2 = compute_unique_id(u.x) id_x2 = compute_unique_id(x) print u.x, id_prebuilt1, id_prebuilt2 print x, id_x1, id_x2 return ((id_x1 == id_x2) * 1 + (id_prebuilt1 == id_prebuilt2) * 10 + (id_x1 != id_prebuilt1) * 100)
def define_prebuilt_weakref(cls): import weakref class A: pass a = A() a.hello = 42 refs = [weakref.ref(a), weakref.ref(A())] rgc.collect() def fn(): result = 0 for i in range(2): a = refs[i]() rgc.collect() if a is None: result += (i + 1) else: result += a.hello * (i + 1) return result return fn
def h(n): x3 = g(3) x4 = g(3) x1 = g(n) build(x1, n) # can collect! check(x1, n, 1) build(x3, 3) x2 = g(n // 2) # allocate more and try again build(x2, n // 2) check(x1, n, 11) check(x2, n // 2, 12) build(x4, 3) check(x3, 3, 13) # check these old objects too check(x4, 3, 14) # check these old objects too rgc.collect() check(x1, n, 21) check(x2, n // 2, 22) check(x3, 3, 23) check(x4, 3, 24)
def f(): if compute_hash(c) != compute_identity_hash(c): return 12 if compute_hash(d) != h_d: return 13 if compute_hash(("Hi", None, (7.5, 2, d))) != h_t: return 14 c2 = C() h_c2 = compute_hash(c2) if compute_hash(c2) != h_c2: return 15 if compute_identity_hash(s) == h_s: return 16 # unlikely i = 0 while i < 6: rgc.collect() if compute_hash(c2) != h_c2: return i i += 1 return 42