def checkPackAfterUndoManyTimes(self): db = DB(self._storage) cn = db.open() rt = cn.root() rt["test"] = MinPO(1) transaction.commit() rt["test2"] = MinPO(2) transaction.commit() rt["test"] = MinPO(3) txn = transaction.get() txn.note(u"root of undo") txn.commit() packtimes = [] for i in range(10): L = db.undoInfo() db.undo(L[0]["id"]) txn = transaction.get() txn.note(u"undo %d" % i) txn.commit() rt._p_deactivate() cn.sync() self.assertEqual(rt["test"].value, i % 2 and 3 or 1) self.assertEqual(rt["test2"].value, 2) packtimes.append(time.time()) snooze() for t in packtimes: self._storage.pack(t, referencesf) cn.sync() # TODO: Is _cache supposed to have a clear() method, or not? # cn._cache.clear() # The last undo set the value to 3 and pack should # never change that. self.assertEqual(rt["test"].value, 3) self.assertEqual(rt["test2"].value, 2) self._inter_pack_pause()
def _dostore(self, storage, oid=None, revid=None, data=None, already_pickled=0, user=None, description=None): # Borrowed from StorageTestBase, to allow passing storage. """Do a complete storage transaction. The defaults are: - oid=None, ask the storage for a new oid - revid=None, use a revid of ZERO - data=None, pickle up some arbitrary data (the integer 7) Returns the object's new revision id. """ import transaction from ZODB.tests.MinPO import MinPO if oid is None: oid = storage.new_oid() if revid is None: revid = StorageTestBase.ZERO if data is None: data = MinPO(7) if type(data) == int: data = MinPO(data) if not already_pickled: data = StorageTestBase.zodb_pickle(data) # Begin the transaction t = transaction.Transaction() if user is not None: t.user = user if description is not None: t.description = description try: storage.tpc_begin(t) # Store an object r1 = storage.store(oid, revid, data, '', t) # Finish the transaction r2 = storage.tpc_vote(t) revid = StorageTestBase.handle_serials(oid, r1, r2) storage.tpc_finish(t) except: storage.tpc_abort(t) raise return revid
def checkCreationUndoneGetTid(self): # create an object oid = self._storage.new_oid() self._dostore(oid, data=MinPO(23)) # undo its creation info = self._storage.undoInfo() tid = info[0]['id'] self.undo(tid, 'undo1') # Check that calling getTid on an uncreated object raises a KeyError # The current version of FileStorage fails this test self.assertRaises(KeyError, self._storage.getTid, oid)
def run(self): tname = self.getName() testcase = self.testcase # Create client connections to each server clients = self.clients for i in range(len(testcase.addr)): c = testcase.openClientStorage(addr=testcase.addr[i]) c.__name = "C%d" % i clients.append(c) for i in range(testcase.ntrans): # Because we want a transaction spanning all storages, # we can't use _dostore(). This is several _dostore() calls # expanded in-line (mostly). # Create oid->serial mappings for c in clients: c.__oids = [] c.__serials = {} # Begin a transaction t = TransactionMetaData() for c in clients: #print("%s.%s.%s begin" % (tname, c.__name, i)) c.tpc_begin(t) for j in range(testcase.nobj): for c in clients: # Create and store a new object on each server oid = c.new_oid() c.__oids.append(oid) data = MinPO("%s.%s.t%d.o%d" % (tname, c.__name, i, j)) #print(data.value) data = zodb_pickle(data) c.store(oid, ZERO, data, '', t) # Vote on all servers and handle serials for c in clients: #print("%s.%s.%s vote" % (tname, c.__name, i)) c.tpc_vote(t) # Finish on all servers for c in clients: #print("%s.%s.%s finish\n" % (tname, c.__name, i)) c.tpc_finish(t) for c in clients: # Check that we got serials for all oids for oid in c.__oids: testcase.failUnless(oid in c.__serials) # Check that we got serials for no other oids for oid in c.__serials.keys(): testcase.failUnless(oid in c.__oids)
def checkDisconnectedCacheWorks(self): # Check that the cache works when the client is disconnected. self._storage = self.openClientStorage('test') oid1 = self._storage.new_oid() obj1 = MinPO("1" * 500) self._dostore(oid1, data=obj1) oid2 = self._storage.new_oid() obj2 = MinPO("2" * 500) self._dostore(oid2, data=obj2) expected1 = self._storage.load(oid1, '') expected2 = self._storage.load(oid2, '') # Shut it all down, and try loading from the persistent cache file # without a server present. self._storage.close() self.shutdownServer() self._storage = self.openClientStorage('test', wait=False) self.assertEqual(expected1, self._storage.load(oid1, '')) self.assertEqual(expected2, self._storage.load(oid2, '')) self._storage.close()
def __store_two_for_read_current_error(self, release_extra_storage=False): db = self._closing(DB(self._storage, pool_size=1)) conn = db.open() root = conn.root() root['object1'] = MinPO('object1') root['object2'] = MinPO('object2') transaction.commit() obj1_oid = root['object1']._p_oid obj2_oid = root['object2']._p_oid obj1_tid = root['object1']._p_serial assert obj1_tid == root['object2']._p_serial conn.close() # We can't close the DB, that will close the storage that we # still need. But we can release its storage, since we'll never use # this again. if release_extra_storage: conn._normal_storage.release() return obj1_oid, obj2_oid, obj1_tid, db
def checkPackVersions(self): db = DB(self._storage) cn = db.open(version="testversion") root = cn.root() obj = root["obj"] = MinPO("obj") root["obj2"] = MinPO("obj2") txn = transaction.get() txn.note("create 2 objs in version") txn.commit() obj.value = "77" txn = transaction.get() txn.note("modify obj in version") txn.commit() # undo the modification to generate a mix of backpointers # and versions for pack to chase info = db.undoInfo() db.undo(info[0]["id"]) txn = transaction.get() txn.note("undo modification") txn.commit() snooze() self._storage.pack(time.time(), referencesf) db.commitVersion("testversion") txn = transaction.get() txn.note("commit version") txn.commit() cn = db.open() root = cn.root() root["obj"] = "no version" txn = transaction.get() txn.note("modify obj") txn.commit() self._storage.pack(time.time(), referencesf)
def checkTransactionalUndoAfterPack(self): # bwarsaw Date: Thu Mar 28 21:04:43 2002 UTC # This is a test which should provoke the underlying bug in # transactionalUndo() on a standby storage. If our hypothesis # is correct, the bug is in FileStorage, and is caused by # encoding the file position in the `id' field of the undoLog # information. Note that Full just encodes the tid, but this # is a problem for FileStorage (we have a strategy for fixing # this). # So, basically, this makes sure that undo info doesn't depend # on file positions. We change the file positions in an undo # record by packing. # Add a few object revisions oid = b'\0' * 8 revid0 = self._dostore(oid, data=MinPO(50)) revid1 = self._dostore(oid, revid=revid0, data=MinPO(51)) snooze() packtime = time.time() snooze() # time.time() now distinct from packtime revid2 = self._dostore(oid, revid=revid1, data=MinPO(52)) self._dostore(oid, revid=revid2, data=MinPO(53)) # Now get the undo log info = self._storage.undoInfo() self.assertEqual(len(info), 4) tid = info[0]['id'] # Now pack just the initial revision of the object. We need the # second revision otherwise we won't be able to undo the third # revision! self._storage.pack(packtime, referencesf) # Make some basic assertions about the undo information now info2 = self._storage.undoInfo() self.assertEqual(len(info2), 2) # And now attempt to undo the last transaction undone, = self.undo(tid) self.assertEqual(undone, oid) data, revid = load_current(self._storage, oid) # The object must now be at the second state self.assertEqual(zodb_unpickle(data), MinPO(52)) self._iterate()
def checkZEOInvalidation(self): addr = self._storage._addr storage2 = ClientStorage(addr, wait=1, min_disconnect_poll=0.1) try: oid = self._storage.new_oid() ob = MinPO('first') revid1 = self._dostore(oid, data=ob) data, serial = storage2.load(oid, '') self.assertEqual(zodb_unpickle(data), MinPO('first')) self.assertEqual(serial, revid1) revid2 = self._dostore(oid, data=MinPO('second'), revid=revid1) for n in range(3): # Let the server and client talk for a moment. # Is there a better way to do this? asyncore.poll(0.1) data, serial = storage2.load(oid, '') self.assertEqual(zodb_unpickle(data), MinPO('second'), 'Invalidation message was not sent!') self.assertEqual(serial, revid2) finally: storage2.close()
def iter_verify(self, txniter, revids, val0): eq = self.assertEqual oid = self._oid val = val0 for reciter, revid in itertools.izip(txniter, revids + [None]): eq(reciter.tid, revid) for rec in reciter: eq(rec.oid, oid) eq(rec.tid, revid) eq(zodb_unpickle(rec.data), MinPO(val)) val = val + 1 eq(val, val0 + len(revids))
def checkUndoCreationBranch1(self): eq = self.assertEqual oid = self._storage.new_oid() revid = self._dostore(oid, data=MinPO(11)) revid = self._dostore(oid, revid=revid, data=MinPO(12)) # Undo the last transaction info = self._storage.undoInfo() self._undo(info[0]['id'], [oid]) data, revid = load_current(self._storage, oid) eq(zodb_unpickle(data), MinPO(11)) # Now from here, we can either redo the last undo, or undo the object # creation. Let's undo the object creation. info = self._storage.undoInfo() self._undo(info[2]['id'], [oid]) self.assertRaises(KeyError, load_current, self._storage, oid) # Loading current data via loadBefore should raise a POSKeyError too: self.assertRaises(KeyError, self._storage.loadBefore, oid, b'\x7f\xff\xff\xff\xff\xff\xff\xff') self._iterate()
def checkSimpleTransactionalUndo(self): eq = self.assertEqual oid = self._storage.new_oid() revid = self._dostore(oid, data=MinPO(23)) revid = self._dostore(oid, revid=revid, data=MinPO(24)) revid = self._dostore(oid, revid=revid, data=MinPO(25)) info = self._storage.undoInfo() # Now start an undo transaction self._undo(info[0]["id"], [oid], note="undo1") data, revid = load_current(self._storage, oid) eq(zodb_unpickle(data), MinPO(24)) # Do another one info = self._storage.undoInfo() self._undo(info[2]["id"], [oid], note="undo2") data, revid = load_current(self._storage, oid) eq(zodb_unpickle(data), MinPO(23)) # Try to undo the first record info = self._storage.undoInfo() self._undo(info[4]["id"], [oid], note="undo3") # This should fail since we've undone the object's creation self.assertRaises(KeyError, load_current, self._storage, oid) # And now let's try to redo the object's creation info = self._storage.undoInfo() self._undo(info[0]["id"], [oid]) data, revid = load_current(self._storage, oid) eq(zodb_unpickle(data), MinPO(23)) self._iterate()
def checkVersions(self): unless = self.failUnless # Store some objects in the non-version oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() oid3 = self._storage.new_oid() revid1 = self._dostore(oid1, data=MinPO(11)) revid2 = self._dostore(oid2, data=MinPO(12)) revid3 = self._dostore(oid3, data=MinPO(13)) # Now create some new versions revid1 = self._dostore(oid1, revid=revid1, data=MinPO(14), version='one') revid2 = self._dostore(oid2, revid=revid2, data=MinPO(15), version='two') revid3 = self._dostore(oid3, revid=revid3, data=MinPO(16), version='three') # Ask for the versions versions = self._storage.versions() unless('one' in versions) unless('two' in versions) unless('three' in versions) # Now flex the `max' argument versions = self._storage.versions(1) self.assertEqual(len(versions), 1) unless('one' in versions or 'two' in versions or 'three' in versions)
def checkCommitVersionErrors(self): eq = self.assertEqual oid1, version1 = self._setup_version('one') data, revid1 = self._storage.load(oid1, version1) eq(zodb_unpickle(data), MinPO(54)) t = Transaction() self._storage.tpc_begin(t) try: self.assertRaises(POSException.VersionCommitError, self._storage.commitVersion, 'one', 'one', t) finally: self._storage.tpc_abort(t)
def checkAbortVersionNonCurrent(self): # Make sure the non-current serial number is correctly # after a version is aborted. oid, version = self._setup_version() self._abortVersion(version) data, tid, ver = self._storage.loadEx(oid, "") # write a new revision of oid so that the aborted-version txn # is not current self._dostore(oid, revid=tid, data=MinPO(17)) ltid = self._storage.lastTransaction() ncdata, ncstart, end = self._storage.loadBefore(oid, ltid) self.assertEqual(data, ncdata) self.assertEqual(tid, ncstart)
def checkTwoObjectUndoAtOnce(self): # Convenience eq = self.assertEqual unless = self.failUnless p30, p31, p32, p50, p51, p52 = map( zodb_pickle, map(MinPO, (30, 31, 32, 50, 51, 52))) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction d = self._multi_obj_transaction([ (oid1, revid1, p30), (oid2, revid2, p50), ]) eq(d[oid1], d[oid2]) # Update those same two objects d = self._multi_obj_transaction([ (oid1, d[oid1], p31), (oid2, d[oid2], p51), ]) eq(d[oid1], d[oid2]) # Update those same two objects d = self._multi_obj_transaction([ (oid1, d[oid1], p32), (oid2, d[oid2], p52), ]) eq(d[oid1], d[oid2]) revid1 = self._transaction_newserial(oid1) revid2 = self._transaction_newserial(oid2) eq(revid1, revid2) # Make sure the objects have the current value data, revid1 = self._storage.load(oid1, '') eq(zodb_unpickle(data), MinPO(32)) data, revid2 = self._storage.load(oid2, '') eq(zodb_unpickle(data), MinPO(52)) # Now attempt to undo the transaction containing two objects info = self._storage.undoInfo() tid = info[0]['id'] tid1 = info[1]['id'] t = Transaction() oids = self._begin_undos_vote(t, tid, tid1) self._storage.tpc_finish(t) # We get the finalization stuff called an extra time: eq(len(oids), 4) unless(oid1 in oids) unless(oid2 in oids) data, revid1 = self._storage.load(oid1, '') eq(zodb_unpickle(data), MinPO(30)) data, revid2 = self._storage.load(oid2, '') eq(zodb_unpickle(data), MinPO(50)) # Now try to undo the one we just did to undo, whew info = self._storage.undoInfo() self._undo(info[0]['id'], [oid1, oid2]) data, revid1 = self._storage.load(oid1, '') eq(zodb_unpickle(data), MinPO(32)) data, revid2 = self._storage.load(oid2, '') eq(zodb_unpickle(data), MinPO(52)) self._iterate()
def checkLoadBeforeEdges(self): # Check the edges cases for a non-current load. oid = self._storage.new_oid() self.assertRaises(KeyError, self._storage.loadBefore, oid, p64(0)) revid1 = self._dostore(oid, data=MinPO(1)) self.assertEqual(self._storage.loadBefore(oid, p64(0)), None) self.assertEqual(self._storage.loadBefore(oid, revid1), None) cur = p64(u64(revid1) + 1) data, start, end = self._storage.loadBefore(oid, cur) self.assertEqual(zodb_unpickle(data), MinPO(1)) self.assertEqual(start, revid1) self.assertEqual(end, None) revid2 = self._dostore(oid, revid=revid1, data=MinPO(2)) data, start, end = self._storage.loadBefore(oid, cur) self.assertEqual(zodb_unpickle(data), MinPO(1)) self.assertEqual(start, revid1) self.assertEqual(end, revid2)
def checkTwoObjectUndoAgain(self): eq = self.assertEqual p31, p32, p33, p51, p52, p53 = map( zodb_pickle, map(MinPO, (31, 32, 33, 51, 52, 53))) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=p31, already_pickled=1) revid2 = self._dostore(oid2, data=p51, already_pickled=1) # Update those same two objects t = Transaction() self._storage.tpc_begin(t) self._transaction_begin() self._transaction_store(oid1, revid1, p32, '', t) self._transaction_store(oid2, revid2, p52, '', t) # Finish the transaction self._transaction_vote(t) self._storage.tpc_finish(t) revid1 = self._transaction_newserial(oid1) revid2 = self._transaction_newserial(oid2) eq(revid1, revid2) # Now attempt to undo the transaction containing two objects info = self._storage.undoInfo() self._undo(info[0]["id"], [oid1, oid2]) data, revid1 = self._storage.load(oid1, '') eq(zodb_unpickle(data), MinPO(31)) data, revid2 = self._storage.load(oid2, '') eq(zodb_unpickle(data), MinPO(51)) # Like the above, but this time, the second transaction contains only # one object. t = Transaction() self._storage.tpc_begin(t) self._transaction_begin() self._transaction_store(oid1, revid1, p33, '', t) self._transaction_store(oid2, revid2, p53, '', t) # Finish the transaction self._transaction_vote(t) self._storage.tpc_finish(t) revid1 = self._transaction_newserial(oid1) revid2 = self._transaction_newserial(oid2) eq(revid1, revid2) # Update in different transactions revid1 = self._dostore(oid1, revid=revid1, data=MinPO(34)) revid2 = self._dostore(oid2, revid=revid2, data=MinPO(54)) # Now attempt to undo the transaction containing two objects info = self._storage.undoInfo() tid = info[1]['id'] t = Transaction() oids = self._begin_undos_vote(t, tid) self._storage.tpc_finish(t) eq(len(oids), 1) self.failUnless(oid1 in oids) self.failUnless(not oid2 in oids) data, revid1 = self._storage.load(oid1, '') eq(zodb_unpickle(data), MinPO(33)) data, revid2 = self._storage.load(oid2, '') eq(zodb_unpickle(data), MinPO(54)) self._iterate()
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = TransactionMetaData() try: self._storage.tpc_begin(t, p64(tid)) self._storage.store(oid, revid, data, '', t) # Finish the transaction self._storage.tpc_vote(t) newrevid = self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def checkLoad_was_checkLoadEx(self): oid = self._storage.new_oid() self._dostore(oid, data=42) data, tid = self._storage.load(oid, "") self.assertEqual(zodb_unpickle(data), MinPO(42)) match = False for txn in self._storage.iterator(): for rec in txn: if rec.oid == oid and rec.tid == tid: self.assertEqual(txn.tid, tid) match = True if not match: self.fail("Could not find transaction with matching id")
def checkConflictCacheIsCleared(self): old_gcevery = TemporaryStorage.CONFLICT_CACHE_GCEVERY old_maxage = TemporaryStorage.CONFLICT_CACHE_MAXAGE TemporaryStorage.CONFLICT_CACHE_GCEVERY = 5 TemporaryStorage.CONFLICT_CACHE_MAXAGE = 5 try: oid = self._storage.new_oid() self._dostore(oid, data=MinPO(5)) time.sleep(TemporaryStorage.CONFLICT_CACHE_GCEVERY + 1) oid2 = self._storage.new_oid() self._dostore(oid2, data=MinPO(10)) oid3 = self._storage.new_oid() self._dostore(oid3, data=MinPO(9)) assert len(self._storage._conflict_cache) == 2 time.sleep(TemporaryStorage.CONFLICT_CACHE_GCEVERY + 1) oid4 = self._storage.new_oid() self._dostore(oid4, data=MinPO(11)) assert len(self._storage._conflict_cache) == 1 finally: TemporaryStorage.CONFLICT_CACHE_GCEVERY = old_gcevery TemporaryStorage.CONFLICT_CACHE_MAXAGE = old_maxage
def checkExtendedIteration(self): # Store a bunch of revisions of a single object self._oid = oid = self._storage.new_oid() revid1 = self._dostore(oid, data=MinPO(11)) revid2 = self._dostore(oid, revid=revid1, data=MinPO(12)) revid3 = self._dostore(oid, revid=revid2, data=MinPO(13)) revid4 = self._dostore(oid, revid=revid3, data=MinPO(14)) # Note that the end points are included # Iterate over all of the transactions with explicit start/stop txniter = self._storage.iterator(revid1, revid4) self.iter_verify(txniter, [revid1, revid2, revid3, revid4], 11) # Iterate over some of the transactions with explicit start txniter = self._storage.iterator(revid3) self.iter_verify(txniter, [revid3, revid4], 13) # Iterate over some of the transactions with explicit stop txniter = self._storage.iterator(None, revid2) self.iter_verify(txniter, [revid1, revid2], 11) # Iterate over some of the transactions with explicit start+stop txniter = self._storage.iterator(revid2, revid3) self.iter_verify(txniter, [revid2, revid3], 12) # Specify an upper bound somewhere in between values revid3a = p64((U64(revid3) + U64(revid4)) / 2) txniter = self._storage.iterator(revid2, revid3a) self.iter_verify(txniter, [revid2, revid3], 12) # Specify a lower bound somewhere in between values. # revid2 == revid1+1 is very likely on Windows. Adding 1 before # dividing ensures that "the midpoint" we compute is strictly larger # than revid1. revid1a = p64((U64(revid1) + 1 + U64(revid2)) / 2) assert revid1 < revid1a txniter = self._storage.iterator(revid1a, revid3a) self.iter_verify(txniter, [revid2, revid3], 12) # Specify an empty range txniter = self._storage.iterator(revid3, revid2) self.iter_verify(txniter, [], 13) # Specify a singleton range txniter = self._storage.iterator(revid3, revid3) self.iter_verify(txniter, [revid3], 13)
def checkSimpleHistory(self): eq = self.assertEqual # Store a couple of revisions of the object oid = self._storage.new_oid() self.assertRaises(KeyError, self._storage.history, oid) revid1 = self._dostore(oid, data=MinPO(11)) revid2 = self._dostore(oid, revid=revid1, data=MinPO(12)) revid3 = self._dostore(oid, revid=revid2, data=MinPO(13)) # Now get various snapshots of the object's history h = self._storage.history(oid, size=1) eq(len(h), 1) d = h[0] eq(d['tid'], revid3) # Try to get 2 historical revisions h = self._storage.history(oid, size=2) eq(len(h), 2) d = h[0] eq(d['tid'], revid3) d = h[1] eq(d['tid'], revid2) # Try to get all 3 historical revisions h = self._storage.history(oid, size=3) eq(len(h), 3) d = h[0] eq(d['tid'], revid3) d = h[1] eq(d['tid'], revid2) d = h[2] eq(d['tid'], revid1) # There should be no more than 3 revisions h = self._storage.history(oid, size=4) eq(len(h), 3) d = h[0] eq(d['tid'], revid3) d = h[1] eq(d['tid'], revid2) d = h[2] eq(d['tid'], revid1)
def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = transaction.Transaction() try: self._storage.tpc_begin(t, p64(tid)) r1 = self._storage.store(oid, revid, data, '', t) # Finish the transaction r2 = self._storage.tpc_vote(t) newrevid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid
def checkAbortVersion(self): eq = self.assertEqual oid, version = self._setup_version() # Not sure I can write a test for getSerial() in the # presence of aborted versions, because FileStorage and # Berkeley storage give a different answer. I think Berkeley # is right and FS is wrong. oids = self._abortVersion(version) eq(len(oids), 1) eq(oids[0], oid) data, revid = self._storage.load(oid, '') eq(zodb_unpickle(data), MinPO(51))
def checkCreationUndoneGetTid(self): # create an object oid = self._storage.new_oid() self._dostore(oid, data=MinPO(23)) # undo its creation info = self._storage.undoInfo() tid = info[0]['id'] t = Transaction() t.note('undo1') self._begin_undos_vote(t, tid) self._storage.tpc_finish(t) # Check that calling getTid on an uncreated object raises a KeyError # The current version of FileStorage fails this test self.assertRaises(KeyError, self._storage.getTid, oid)
def checkPackLotsWhileWriting(self): # This is like the other pack-while-writing tests, except it packs # repeatedly until the client thread is done. At the time it was # introduced, it reliably provoked # CorruptedError: ... transaction with checkpoint flag set # in the ZEO flavor of the FileStorage tests. db = DB(self._storage) conn = db.open() root = conn.root() choices = list(range(10)) for i in choices: root[i] = MinPO(i) transaction.commit() snooze() packt = time.time() for dummy in choices: for i in choices: root[i].value = MinPO(i) transaction.commit() NUM_LOOP_TRIP = 100 timer = ElapsedTimer(time.time()) thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0) thread.start() while thread.isAlive(): db.pack(packt) snooze() packt = time.time() thread.join() self._sanity_check() db.close()
def checkUndoInvalidation(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=MinPO(23)) revid = self._dostore(oid, revid=revid, data=MinPO(24)) revid = self._dostore(oid, revid=revid, data=MinPO(25)) info = self._storage.undoInfo() if not info: # Preserved this comment, but don't understand it: # "Perhaps we have an old storage implementation that # does do the negative nonsense." info = self._storage.undoInfo(0, 20) tid = info[0]['id'] # We may need to bail at this point if the storage doesn't # support transactional undo if not self._storage.supportsTransactionalUndo(): return # Now start an undo transaction t = Transaction() t.note('undo1') self._storage.tpc_begin(t) tid, oids = self._storage.undo(tid, t) # Make sure this doesn't load invalid data into the cache self._storage.load(oid, '') self._storage.tpc_vote(t) self._storage.tpc_finish(t) assert len(oids) == 1 assert oids[0] == oid data, revid = self._storage.load(oid, '') obj = zodb_unpickle(data) assert obj == MinPO(24)
def _dostore(self, oid=None, revid=None, data=None, already_pickled=0, user=None, description=None): """Do a complete storage transaction. The defaults are: - oid=None, ask the storage for a new oid - revid=None, use a revid of ZERO - data=None, pickle up some arbitrary data (the integer 7) Returns the object's new revision id. """ if oid is None: oid = self._storage.new_oid() if revid is None: revid = ZERO if data is None: data = MinPO(7) if type(data) == int: data = MinPO(data) if not already_pickled: data = zodb_pickle(data) # Begin the transaction t = transaction.Transaction() if user is not None: t.user = user if description is not None: t.description = description try: self._storage.tpc_begin(t) # Store an object r1 = self._storage.store(oid, revid, data, '', t) # Finish the transaction r2 = self._storage.tpc_vote(t) revid = self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return revid
def checkTwoObjectUndo(self): eq = self.assertEqual # Convenience p31, p32, p51, p52 = map(zodb_pickle, map(MinPO, (31, 32, 51, 52))) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction t = TransactionMetaData() self._storage.tpc_begin(t) self._storage.store(oid1, revid1, p31, '', t) self._storage.store(oid2, revid2, p51, '', t) # Finish the transaction self._storage.tpc_vote(t) tid = self._storage.tpc_finish(t) # Update those same two objects t = TransactionMetaData() self._storage.tpc_begin(t) self._storage.store(oid1, tid, p32, '', t) self._storage.store(oid2, tid, p52, '', t) # Finish the transaction self._storage.tpc_vote(t) self._storage.tpc_finish(t) # Make sure the objects have the current value data, revid1 = load_current(self._storage, oid1) eq(zodb_unpickle(data), MinPO(32)) data, revid2 = load_current(self._storage, oid2) eq(zodb_unpickle(data), MinPO(52)) # Now attempt to undo the transaction containing two objects info = self._storage.undoInfo() self._undo(info[0]['id'], [oid1, oid2]) data, revid1 = load_current(self._storage, oid1) eq(zodb_unpickle(data), MinPO(31)) data, revid2 = load_current(self._storage, oid2) eq(zodb_unpickle(data), MinPO(51)) self._iterate()