def test_bigfile_filezodb_vs_conflicts(): root = dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() # setup zfile with fileh view to it root1['zfile3a'] = f1 = ZBigFile(blksize) tm1.commit() fh1 = f1.fileh_open() tm1.commit() # set zfile initial data vma1 = fh1.mmap(0, 1) Blk(vma1, 0)[0] = 1 tm1.commit() # read zfile and setup fileh for it in conn2 conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() f2 = root2['zfile3a'] fh2 = f2.fileh_open() vma2 = fh2.mmap(0, 1) assert Blk(vma2, 0)[0] == 1 # read data in conn2 + make sure read correctly # now zfile content is both in ZODB.Connection cache and in _ZBigFileH # cache for each conn1 and conn2. Modify data in both conn1 and conn2 and # see how it goes. Blk(vma1, 0)[0] = 11 Blk(vma2, 0)[0] = 12 # txn1 should commit ok tm1.commit() # txn2 should raise ConflictError and stay at 11 state raises(ConflictError, 'tm2.commit()') tm2.abort() assert Blk(vma2, 0)[0] == 11 # re-read in conn2 Blk(vma2, 0)[0] = 13 tm2.commit() assert Blk(vma1, 0)[0] == 11 # not yet propagated to conn1 tm1.commit() # transaction boundary assert Blk(vma1, 0)[0] == 13 # re-read in conn1 conn2.close() dbclose(root1)
def test_bigfile_filezodb_fileh_gc(): root1= dbopen() conn1= root1._p_jar db = conn1.db() root1['zfile4'] = f1 = ZBigFile(blksize) transaction.commit() fh1 = f1.fileh_open() vma1 = fh1.mmap(0, 1) wfh1 = weakref.ref(fh1) assert wfh1() is fh1 conn1.close() del vma1, fh1, f1, root1 conn2 = db.open() root2 = conn2.root() f2 = root2['zfile4'] fh2 = f2.fileh_open() vma2 = fh2.mmap(0, 1) gc.collect() assert wfh1() is None # fh1 should be gone del vma2, fh2, f2 dbclose(root2)
def test_bigfile_filezodb_fmt_change(): root = dbopen() root['zfile5'] = f = ZBigFile(blksize) transaction.commit() fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # save/restore original ZBlk_fmt_write fmt_write_save = file_zodb.ZBlk_fmt_write try: # check all combinations of format pairs via working with blk #0 and # checking internal f structure for src_fmt, src_type in ZBlk_fmt_registry.items(): for dst_fmt, dst_type in ZBlk_fmt_registry.items(): if src_fmt == dst_fmt: continue # skip checking e.g. ZBlk0 -> ZBlk0 file_zodb.ZBlk_fmt_write = src_fmt struct.pack_into('p', vma, 0, b(src_fmt)) transaction.commit() assert type(f.blktab[0]) is src_type file_zodb.ZBlk_fmt_write = dst_fmt struct.pack_into('p', vma, 0, b(dst_fmt)) transaction.commit() assert type(f.blktab[0]) is dst_type finally: file_zodb.ZBlk_fmt_write = fmt_write_save dbclose(root)
def test_bigfile_filezodb_vs_cache_invalidation(): root = dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() # setup zfile with fileh view to it root1['zfile3'] = f1 = ZBigFile(blksize) tm1.commit() fh1 = f1.fileh_open() tm1.commit() # set zfile initial data vma1 = fh1.mmap(0, 1) Blk(vma1, 0)[0] = 1 tm1.commit() # read zfile and setup fileh for it in conn2 conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() f2 = root2['zfile3'] fh2 = f2.fileh_open() vma2 = fh2.mmap(0, 1) assert Blk(vma2, 0)[0] == 1 # read data in conn2 + make sure read correctly # now zfile content is both in ZODB.Connection cache and in _ZBigFileH # cache for each conn1 and conn2. Modify data in conn1 and make sure it # fully propagate to conn2. Blk(vma1, 0)[0] = 2 tm1.commit() # still should be read as old value in conn2 assert Blk(vma2, 0)[0] == 1 # and even after virtmem pages reclaim # ( verifies that _p_invalidate() in ZBlk.loadblkdata() does not lead to # reloading data as updated ) ram_reclaim_all() assert Blk(vma2, 0)[0] == 1 tm2.commit() # transaction boundary for t2 # data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2 assert Blk(vma2, 0)[0] == 2 conn2.close() del conn2, root2 dbclose(root1)
def setup_module(): global testdb testdb = getTestDB() testdb.setup() root = testdb.dbopen() root['zfile'] = ZBigFile(blksize) transaction.commit() dbclose(root)
def test_zbigarray_vs_conflicts_metadata(): root = testdb.dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() # setup zarray root1['zarray3b'] = a1 = ZBigArray((10,), uint8) tm1.commit() # set zarray initial data a1[0:1] = [1] # XXX -> [0] = 1 after BigArray can tm1.commit() # read zarray in conn2 conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() a2 = root2['zarray3b'] assert a2[0:1] == [1] # read data in conn2 + make sure read correctly # XXX -> [0] == 1 after BigArray can # now zarray content is both in ZODB.Connection cache and in _ZBigFileH # cache for each conn1 and conn2. Resize arrays in both conn1 and conn2 and # see how it goes. a1.resize((11,)) a2.resize((12,)) # txn1 should commit ok tm1.commit() # txn2 should raise ConflictError and stay at 11 state raises(ConflictError, 'tm2.commit()') tm2.abort() assert len(a2) == 11 # re-read in conn2 a2.resize((13,)) tm2.commit() assert len(a1) == 11 # not yet propagated to conn1 tm1.commit() # transaction boundary assert len(a1) == 13 # re-read in conn1 conn2.close() dbclose(root1)
def test_zbigarray_vs_cache_invalidation(): root = testdb.dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() # setup zarray root1['zarray3'] = a1 = ZBigArray((10,), uint8) tm1.commit() # set zarray initial data a1[0:1] = [1] # XXX -> [0] = 1 after BigArray can tm1.commit() # read zarray in conn2 conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() a2 = root2['zarray3'] assert a2[0:1] == [1] # read data in conn2 + make sure read correctly # XXX -> [0] == 1 after BigArray can # now zarray content is both in ZODB.Connection cache and in _ZBigFileH # cache for each conn1 and conn2. Modify data in conn1 and make sure it # fully propagate to conn2. a1[0:1] = [2] # XXX -> [0] = 2 after BigArray can tm1.commit() # still should be read as old value in conn2 assert a2[0:1] == [1] # and even after virtmem pages reclaim # ( verifies that _p_invalidate() in ZBlk.loadblkdata() does not lead to # reloading data as updated ) ram_reclaim_all() assert a2[0:1] == [1] tm2.commit() # transaction boundary for t2 # data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2 assert a2[0] == 2 conn2.close() del conn2, root2 dbclose(root1)
def bench_bigz_writeff(): root = testdb.dbopen() f = root['zfile'] fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize memset(vma, 0xff) transaction.commit() del vma # TODO vma.close() del fh # TODO fh.close() del f # XXX f.close() ? dbclose(root)
def _bench_bigz_hash(hasher, expect): root = testdb.dbopen() f = root['zfile'] fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize h = hasher() h.update(vma) del vma # vma.close() del fh # fh.close() del f # f.close() dbclose(root) assert h.digest() == expect
def test_zbigarray_invalidate_shape(): root = testdb.dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn print tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() # setup zarray root1['zarray4'] = a1 = ZBigArray((10,), uint8) tm1.commit() # set zarray initial data a1[0:1] = [1] # XXX -> [0] = 1 after BigArray can tm1.commit() # read zarray in conn2 conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() a2 = root2['zarray4'] assert a2[0:1] == [1] # read data in conn2 + make sure read correctly # XXX -> [0] == 1 after BigArray can # append to a1 which changes both RAM pages and a1.shape assert a1.shape == (10,) a1.append([123]) assert a1.shape == (11,) assert a1[10:11] == [123] # XXX -> [10] = 123 after BigArray can tm1.commit() tm2.commit() # just transaction boundary for t2 # data from tm1 should propagate to tm assert a2.shape == (11,) assert a2[10:11] == [123] # XXX -> [10] = 123 after BigArray can conn2.close() del conn2, root2, a2 dbclose(root1)
def main(): try: act = sys.argv[1] dburi = sys.argv[2] except IndexError: usage() if act not in ('gen', 'read'): usage() ram_nbytes = psutil.virtual_memory().total print('I: RAM: %.2fGB' % (float(ram_nbytes) / GB)) root = dbopen(dburi) if act == 'gen': sig_dtype = dtype(float64) sig_len = (2*ram_nbytes) // sig_dtype.itemsize sig = ZBigArray((sig_len,), sig_dtype) root['signalv'] = sig # ZBigArray requirement: before we can compute it (with subobject # .zfile) have to be made explicitly known to connection or current # transaction committed transaction.commit() gen(sig) elif act == 'read': read(root['signalv']) import os p = psutil.Process(os.getpid()) m = p.memory_info() print('VIRT: %i MB\tRSS: %iMB' % (m.vms//MB, m.rss//MB)) dbclose(root)
def test_zbigarray_order(): # make sure order is properly saved/restored to/from DB root = testdb.dbopen() root['carray'] = ZBigArray((16*1024*1024,), uint8) root['farray'] = ZBigArray((16*1024*1024,), uint8, order='F') transaction.commit() dbclose(root) del root root = testdb.dbopen() C = root['carray'] F = root['farray'] assert isinstance(C, ZBigArray) assert C.shape == (16*1024*1024,) assert C.dtype == dtype(uint8) assert C._order == 'C' assert isinstance(F, ZBigArray) assert F.shape == (16*1024*1024,) assert F.dtype == dtype(uint8) assert F._order == 'F' # make sure we can read previously saved data which had no order set root['coldarray'] = Cold = ZBigArray((16*1024*1024,), uint8) del Cold._order # simulate that it is without assert '_order' not in Cold.__getstate__() transaction.commit() dbclose(root) del root, Cold root = testdb.dbopen() Cold = root['coldarray'] assert Cold._order == 'C' dbclose(root) del root
def test_zbigarray(): root = testdb.dbopen() root['zarray'] = ZBigArray((16*1024*1024,), uint8) transaction.commit() dbclose(root) del root root = testdb.dbopen() A = root['zarray'] assert isinstance(A, ZBigArray) assert A.shape == (16*1024*1024,) assert A.dtype == dtype(uint8) assert all(A[:] == 0) a = A[:] a[1] = 1 a[3] = 3 a[5] = 5 a[-1] = 99 b = A[:] assert (b[0],b[1]) == (0,1) assert (b[2],b[3]) == (0,3) assert (b[4],b[5]) == (0,5) assert all(b[6:-1] == 0) assert b[-1] == 99 # abort - should forget all changes transaction.abort() assert all(a[:] == 0) assert all(b[:] == 0) assert all(A[:] == 0) # now modify again and commit a[33] = 33 a[-2] = 98 assert all(b[:33] == 0) assert b[33] == 33 assert all(b[33+1:-2] == 0) assert b[-2] == 98 assert b[-1] == 0 transaction.commit() # reload DB & array dbclose(root) del root, a,b, A root = testdb.dbopen() A = root['zarray'] assert isinstance(A, ZBigArray) assert A.shape == (16*1024*1024,) assert A.dtype == dtype(uint8) a = A[:] assert all(a[:33] == 0) assert a[33] == 33 assert all(a[33+1:-2] == 0) assert a[-2] == 98 assert a[-1] == 0 # like ZBigFile ZBigArray should survive Persistent cache clearing and not # go to ghost state (else logic to propagate changes from pages to objects # would subtly brake after Persistent cache gc) db = root._p_jar.db() ci = cacheInfo(db) assert ci[kkey(ZBigArray)] == 1 assert A._p_state == UPTODATE db.cacheMinimize() ci = cacheInfo(db) assert ci[kkey(ZBigArray)] == 1 assert A._p_state == UPTODATE # it would be GHOST without LivePersistent protection a[-1] = 99 # would not propagate to file without ZBigFile preventing itself to go to ghost transaction.commit() # reload & verify changes dbclose(root) del root, a, A, db root = testdb.dbopen() A = root['zarray'] assert isinstance(A, ZBigArray) assert A.shape == (16*1024*1024,) assert A.dtype == dtype(uint8) a = A[:] assert all(a[:33] == 0) assert a[33] == 33 assert all(a[33+1:-2] == 0) assert a[-2] == 98 assert a[-1] == 99 # resize array & append data A.resize((24*1024*1024,)) assert A.shape == (24*1024*1024,) assert A.dtype == dtype(uint8) b = A[:] assert array_equal(a, b[:16*1024*1024]) b[16*1024*1024] = 100 b[-1] = 255 A.append(arange(10, 14, dtype=uint8)) # commit; reload & verify changes transaction.commit() dbclose(root) del root, a, b, A root = testdb.dbopen() A = root['zarray'] assert isinstance(A, ZBigArray) assert A.shape == (24*1024*1024 + 4,) assert A.dtype == dtype(uint8) a = A[:] assert all(a[:33] == 0) assert a[33] == 33 assert all(a[33+1:16*1024*1024-2] == 0) assert a[16*1024*1024-2] == 98 assert a[16*1024*1024-1] == 99 assert a[16*1024*1024] == 100 assert a[24*1024*1024-1] == 255 assert a[24*1024*1024+0] == 10 assert a[24*1024*1024+1] == 11 assert a[24*1024*1024+2] == 12 assert a[24*1024*1024+3] == 13 dbclose(root)
def test_bigfile_filezodb(): root = dbopen() root['zfile'] = f = ZBigFile(blksize) transaction.commit() fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize # verify that empty file reads as all zeros data0 = zeros(blksize32, dtype=uint32) dataX = lambda i: arange(i*blksize32, (i+1)*blksize32, dtype=uint32) for i in xrange(blen): assert array_equal(data0, Blk(vma, i)) # dirty data for i in xrange(blen): Blk(vma, i)[:] = dataX(i) # verify that the changes are lost after abort transaction.abort() for i in xrange(blen): assert array_equal(data0, Blk(vma, i)) # dirty & abort once again # (verifies that ZBigFile data manager re-registers with transaction) for i in xrange(blen): Blk(vma, i)[:] = dataX(i) transaction.abort() for i in xrange(blen): assert array_equal(data0, Blk(vma, i)) # dirty data & commit for i in xrange(blen): Blk(vma, i)[:] = dataX(i) transaction.commit() # close DB and reopen everything # vma.unmap() del vma #fh.close() del fh dbclose(root) del root root = dbopen() f = root['zfile'] fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize # verify data as re-loaded for i in xrange(blen): assert array_equal(Blk(vma, i), dataX(i)) # evict all loaded pages and test loading them again # (verifies ZBlk.loadblkdata() & loadblk logic when loading data the second time) reclaimed = ram_reclaim_all() assert reclaimed >= blen # XXX assumes pagesize=blksize for i in xrange(blen): assert array_equal(Blk(vma, i), dataX(i)) # dirty once again & commit # (verified ZBlk.__setstate__() & storeblk logic when storing data the second time) for i in xrange(blen): Blk(vma, i)[0] = i+1 transaction.commit() # close DB and reopen everything del vma del fh dbclose(root) del root root = dbopen() f = root['zfile'] fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize # verify data as re-loaded for i in xrange(blen): assert Blk(vma, i)[0] == i+1 assert array_equal(Blk(vma, i)[1:], dataX(i)[1:]) # ZBigFile should survive Persistent cache clearing and not go to ghost # state (else logic to propagate changes from pages to objects would subtly # brake after Persistent cache gc) db = root._p_jar.db() ci = cacheInfo(db) assert ci[kkey(ZBigFile)] == 1 assert f._p_state == UPTODATE db.cacheMinimize() ci = cacheInfo(db) assert ci[kkey(ZBigFile)] == 1 assert f._p_state == UPTODATE # it would be GHOST without LivePersistent protection # verify that data changes propagation continue to work assert Blk(vma, 0)[0] == 1 assert array_equal(Blk(vma, 0)[1:], dataX(0)[1:]) Blk(vma, 0)[0] = 99 transaction.commit() del vma del fh dbclose(root) del db, root root = dbopen() f = root['zfile'] fh = f.fileh_open() # TODO + ram vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize # verify data as re-loaded assert Blk(vma, 0)[0] == 99 assert array_equal(Blk(vma, 0)[1:], dataX(0)[1:]) for i in xrange(1, blen): assert Blk(vma, i)[0] == i+1 assert array_equal(Blk(vma, i)[1:], dataX(i)[1:]) dbclose(root)
def test_bigfile_filezodb_vs_conn_migration(): root01 = dbopen() conn01 = root01._p_jar db = conn01.db() conn01.close() del root01 c12_1 = NotifyChannel() # T11 -> T21 c21_1 = NotifyChannel() # T21 -> T11 # open, modify, commit, close, open, commit def T11(): tell, wait = c12_1.tell, c21_1.wait conn11_1 = db.open() assert conn11_1 is conn01 # setup zfile with ZBigArray-like satellite, root11_1 = conn11_1.root() root11_1['zfile2'] = f11 = ZBigFile(blksize) transaction.commit() root11_1['zarray2'] = a11 = LivePersistent() a11._v_fileh = fh11 = f11.fileh_open() transaction.commit() # set zfile initial data vma11 = fh11.mmap(0, 1) Blk(vma11, 0)[0] = 11 transaction.commit() # close conn, wait till T21 reopens it del vma11, fh11, a11, f11, root11_1 conn11_1.close() tell('T1-conn11_1-closed') wait('T2-conn21-opened') # open another connection (e.g. for handling next request) which does # not touch zfile at all, and arrange timings so that T2 modifies # zfile, but do not yet commit, and then commit here. conn11_2 = db.open() assert conn11_2 is not conn11_1 root11_2 = conn11_2.root() wait('T2-zfile2-modified') # XXX do we want to also modify some other objesct? # (but this have side effect for joining conn11_2 to txn) transaction.commit() # should be nothing tell('T1-txn12-committed') wait('T2-conn21-closed') del root11_2 conn11_2.close() # hold on this thread until main driver tells us wait('T11-exit-command') # open, modify, abort def T21(): tell, wait = c21_1.tell, c12_1.wait # - wait until T1 finish setting up initial data for zfile and closes connection. # - open that connection before T1 is asleep - because ZODB organizes # connection pool as stack (with correction for #active objects), # we should get exactly the same connection T1 had. wait('T1-conn11_1-closed') conn21 = db.open() assert conn21 is conn01 tell('T2-conn21-opened') # modify zfile and arrange timings so that T1 commits after zfile is # modified, but before we commit/abort. root21 = conn21.root() a21 = root21['zarray2'] fh21 = a21._v_fileh vma21 = fh21.mmap(0, 1) Blk(vma21, 0)[0] = 21 tell('T2-zfile2-modified') wait('T1-txn12-committed') # abort - zfile2 should stay unchanged transaction.abort() del vma21, fh21, a21, root21 conn21.close() tell('T2-conn21-closed') t11, t21 = Thread(target=T11), Thread(target=T21) t11.start(); t21.start() t11_ident = t11.ident t21.join() # NOTE not joining t11 yet # now verify that zfile2 stays at 11 state, i.e. T21 was really aborted conn02 = db.open() # NOTE top of connection stack is conn21(=conn01), becase conn11_2 has 0 # active objects assert conn02 is conn01 root02 = conn02.root() f02 = root02['zfile2'] # NOTE verification is done using afresh fileh to avoid depending on # leftover state from T11/T21. fh02 = f02.fileh_open() vma02 = fh02.mmap(0, 1) assert Blk(vma02, 0)[0] == 11 del vma02, fh02, f02, root02 conn02.close() c12_2 = NotifyChannel() # T12 -> T22 c21_2 = NotifyChannel() # T22 -> T12 # open, abort def T12(): tell, wait = c12_2.tell, c21_2.wait wait('T2-conn22-opened') conn12 = db.open() tell('T1-conn12-opened') wait('T2-zfile2-modified') transaction.abort() tell('T1-txn-aborted') wait('T2-txn-committed') conn12.close() # open, modify, commit def T22(): tell, wait = c21_2.tell, c12_2.wait # make sure we are not the same thread which ran T11 # (should be so because we cared not to stop T11 yet) assert _thread.get_ident() != t11_ident conn22 = db.open() assert conn22 is conn01 tell('T2-conn22-opened') # modify zfile and arrange timings so that T1 does abort after we # modify, but before we commit wait('T1-conn12-opened') root22 = conn22.root() a22 = root22['zarray2'] fh22 = a22._v_fileh vma22 = fh22.mmap(0, 1) Blk(vma22, 0)[0] = 22 tell('T2-zfile2-modified') wait('T1-txn-aborted') # commit - changes should propagate to zfile transaction.commit() tell('T2-txn-committed') conn22.close() t12, t22 = Thread(target=T12), Thread(target=T22) t12.start(); t22.start() t12.join(); t22.join() # tell T11 to stop also c21_1.tell('T11-exit-command') t11.join() # now verify that zfile2 changed to 22 state, i.e. T22 was really committed conn03 = db.open() # NOTE top of connection stack is conn22(=conn01), becase it has most # of # active objectd assert conn03 is conn01 root03 = conn03.root() f03 = root03['zfile2'] fh03 = f03.fileh_open() vma03 = fh03.mmap(0, 1) assert Blk(vma03, 0)[0] == 22 del vma03, fh03, f03 dbclose(root03)
def test_livepersistent(): root = dbopen() transaction.commit() # set root._p_jar db = root._p_jar.db() # ~~~ test `obj initially created` case root['live'] = lp = LivePersistent() assert lp._p_jar is None # connection does not know about it yet assert lp._p_state == UPTODATE # object initially created in uptodate # should not be in cache yet & thus should stay after gc db.cacheMinimize() assert lp._p_jar is None assert lp._p_state == UPTODATE ci = cacheInfo(db) assert kkey(LivePersistent) not in ci # should be registered to connection & cache after commit transaction.commit() assert lp._p_jar is not None assert lp._p_state == UPTODATE ci = cacheInfo(db) assert ci[kkey(LivePersistent)] == 1 # should stay that way after cache gc db.cacheMinimize() assert lp._p_jar is not None assert lp._p_state == UPTODATE ci = cacheInfo(db) assert ci[kkey(LivePersistent)] == 1 # ~~~ reopen & test `obj loaded from db` case dbclose(root) del root, db, lp root = dbopen() db = root._p_jar.db() # known to connection & cache & GHOST # right after first loading from DB lp = root['live'] assert lp._p_jar is not None assert lp._p_state is GHOST ci = cacheInfo(db) assert ci[kkey(LivePersistent)] == 1 # should be UPTODATE for sure after read access getattr(lp, 'attr', None) assert lp._p_jar is not None assert lp._p_state is UPTODATE ci = cacheInfo(db) assert ci[kkey(LivePersistent)] == 1 # does not go back to ghost on cache gc db.cacheMinimize() assert lp._p_jar is not None assert lp._p_state == UPTODATE ci = cacheInfo(db) assert ci[kkey(LivePersistent)] == 1 # ok dbclose(root) del root, db, lp # demo that upon cache invalidation LivePersistent can go back to ghost root = dbopen() conn = root._p_jar db = conn.db() conn.close() del root, conn tm1 = TransactionManager() tm2 = TransactionManager() conn1 = db.open(transaction_manager=tm1) root1 = conn1.root() lp1 = root1['live'] conn2 = db.open(transaction_manager=tm2) root2 = conn2.root() lp2 = root2['live'] # 2 connections are setup running in parallel with initial obj state as ghost assert lp1._p_jar is conn1 assert lp2._p_jar is conn2 assert lp1._p_state is GHOST assert lp2._p_state is GHOST # conn1: modify ghost -> changed lp1.attr = 1 assert lp1._p_state is CHANGED assert lp2._p_state is GHOST # conn2: read ghost -> uptodate assert getattr(lp1, 'attr', None) == 1 assert getattr(lp2, 'attr', None) is None assert lp1._p_state is CHANGED assert lp2._p_state is UPTODATE # conn1: commit changed -> uptodate; conn2 untouched tm1.commit() assert lp1._p_state is UPTODATE assert lp2._p_state is UPTODATE assert getattr(lp1, 'attr', None) == 1 assert getattr(lp2, 'attr', None) is None # conn2: commit (nothing changed - just transaction boundary) # uptodate -> ghost (invalidation) tm2.commit() assert lp1._p_state is UPTODATE assert lp2._p_state is GHOST assert getattr(lp1, 'attr', None) == 1 # conn2: after reading, the state is again uptodate + changes from conn1 are here a = getattr(lp2, 'attr', None) assert lp2._p_state is UPTODATE assert a == 1 conn2.close() del conn2, root2 dbclose(root1)
def test_zbigarray_vs_conn_migration(): root01 = testdb.dbopen() conn01 = root01._p_jar db = conn01.db() conn01.close() del root01 c12_1 = NotifyChannel() # T11 -> T21 c21_1 = NotifyChannel() # T21 -> T11 # open, modify, commit, close, open, commit def T11(): tell, wait = c12_1.tell, c21_1.wait conn11_1 = db.open() assert conn11_1 is conn01 # setup zarray root11_1 = conn11_1.root() root11_1['zarray2'] = a11 = ZBigArray((10,), uint8) transaction.commit() # set initial data a11[0:1] = [11] # XXX -> [0] = 11 after BigArray can transaction.commit() # close conn, wait till T21 reopens it del a11, root11_1 conn11_1.close() tell('T1-conn11_1-closed') wait('T2-conn21-opened') # open nother connection. it must be different # (see appropriate place in zfile test about why) conn11_2 = db.open() assert conn11_2 is not conn11_1 root11_2 = conn11_2.root() wait('T2-zarray2-modified') transaction.commit() # should be nothing tell('T1-txn12-committed') wait('T2-conn21-closed') del root11_2 conn11_2.close() # hold on this thread until main driver tells us wait('T11-exit-command') # open, modify, abort def T21(): tell, wait = c21_1.tell, c12_1.wait # wait until T1 finish setting up initial data and get its connection # (see appropriate place in zfile tests for details) wait('T1-conn11_1-closed') conn21 = db.open() assert conn21 is conn01 tell('T2-conn21-opened') # modify zarray and arrange timings so that T1 commits after zarray is # modified, but before we commit/abort. root21 = conn21.root() a21 = root21['zarray2'] a21[0:1] = [21] # XXX -> [0] = 21 after BigArray can tell('T2-zarray2-modified') wait('T1-txn12-committed') # abort - zarray2 should stay unchanged transaction.abort() del a21, root21 conn21.close() tell('T2-conn21-closed') t11, t21 = Thread(target=T11), Thread(target=T21) t11.start(); t21.start() t11_ident = t11.ident t21.join() # NOTE not joining t11 yet # now verify that zarray2 stays at 11 state, i.e. T21 was really aborted conn02 = db.open() # NOTE top of connection stack is conn21(=conn01), becase conn11_2 has 0 # active objects assert conn02 is conn01 root02 = conn02.root() a02 = root02['zarray2'] assert a02[0] == 11 del a02, root02 conn02.close() c12_2 = NotifyChannel() # T12 -> T22 c21_2 = NotifyChannel() # T22 -> T12 # open, abort def T12(): tell, wait = c12_2.tell, c21_2.wait wait('T2-conn22-opened') conn12 = db.open() tell('T1-conn12-opened') wait('T2-zarray2-modified') transaction.abort() tell('T1-txn-aborted') wait('T2-txn-committed') conn12.close() # open, modify, commit def T22(): tell, wait = c21_2.tell, c12_2.wait # make sure we are not the same thread which ran T11 # (should be so because we cared not to stop T11 yet) assert _thread.get_ident() != t11_ident conn22 = db.open() assert conn22 is conn01 tell('T2-conn22-opened') # modify zarray and arrange timings so that T1 does abort after we # modify, but before we commit wait('T1-conn12-opened') root22 = conn22.root() a22 = root22['zarray2'] a22[0:1] = [22] # XXX -> [0] = 22 after BigArray can tell('T2-zarray2-modified') wait('T1-txn-aborted') # commit - changes should propagate to zarray transaction.commit() tell('T2-txn-committed') conn22.close() t12, t22 = Thread(target=T12), Thread(target=T22) t12.start(); t22.start() t12.join(); t22.join() # tell T11 to stop also c21_1.tell('T11-exit-command') t11.join() # now verify that zarray2 changed to 22 state, i.e. T22 was really committed conn03 = db.open() # NOTE top of connection stack is conn22(=conn01), becase it has most # of # active objectd assert conn03 is conn01 root03 = conn03.root() a03 = root03['zarray2'] assert a03[0] == 22 del a03 dbclose(root03)