def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() # Get the last transaction and its record iterator. Record iterators # can't be accessed out-of-order, so we need to do this in a bit # complicated way: for final in it: records = list(final) self._dst.tpc_begin(final, final.tid, final.status) for r in records: self._dst.restore(r.oid, r.tid, r.data, '', r.data_txn, final) self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def check_record_iternext(self): db = DB(self._storage) conn = db.open() conn.root()['abc'] = MinPO('abc') conn.root()['xyz'] = MinPO('xyz') transaction.commit() # Ensure it's all on disk. db.close() self._storage.close() self.open() key = None for x in (b'\000', b'\001', b'\002'): oid, tid, data, next_oid = self._storage.record_iternext(key) self.assertEqual(oid, (b'\000' * 7) + x) key = next_oid expected_data, expected_tid = self._storage.load(oid, '') self.assertEqual(expected_data, data) self.assertEqual(expected_tid, tid) if x == b'\002': self.assertEqual(next_oid, None) else: self.assertNotEqual(next_oid, None)
def checkRestoreAcrossPack(self): db = DB(self._storage) c = db.open() r = c.root() obj = r["obj1"] = MinPO(1) transaction.commit() obj = r["obj2"] = MinPO(1) transaction.commit() self._dst.copyTransactionsFrom(self._storage) self._dst.pack(time.time(), referencesf) self._undo(self._storage.undoInfo()[0]['id']) # copy the final transaction manually. even though there # was a pack, the restore() ought to succeed. it = self._storage.iterator() final = list(it)[-1] self._dst.tpc_begin(final, final.tid, final.status) for r in final: self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn, final) it.close() self._dst.tpc_vote(final) self._dst.tpc_finish(final)
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note('root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note('root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note('root -X->') txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def mtstorehelper(self): name = threading.currentThread().getName() objs = [] for i in range(10): objs.append(MinPO("X" * 200000)) objs.append(MinPO("X")) for obj in objs: self._dostore(data=obj)
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises closing = self._closing __traceback_info__ = self._storage, self._dst db = closing(DB(self._storage)) conn = closing(db.open()) root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note(u'root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note(u'root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note(u'root -X->') txn.commit() storage_last_tid = conn._storage.lastTransaction() self.assertEqual(storage_last_tid, root._p_serial) # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) self.assertEqual(self._dst.lastTransaction(), storage_last_tid) # If the source storage is a history-free storage, all # of the transactions are now marked as packed in the # destination storage. To trigger a pack, we have to # add another transaction to the destination that is # not packed. db2 = closing(DB(self._dst)) tx_manager = transaction.TransactionManager(explicit=True) conn2 = closing(db2.open(tx_manager)) txn = tx_manager.begin() root2 = conn2.root() root2.extra = 0 txn.note(u'root.extra = 0') txn.commit() dest_last_tid = conn2._storage.lastTransaction() self.assertGreater(dest_last_tid, storage_last_tid) self.assertEqual(dest_last_tid, root2._p_serial) # Now pack the destination. from ZODB.utils import u64 as bytes8_to_int64 if IRelStorage.providedBy(self._dst): packtime = bytes8_to_int64(storage_last_tid) else: from persistent.timestamp import TimeStamp packtime = TimeStamp(dest_last_tid).timeTime() + 2 self._dst.pack(packtime, referencesf) # And check to see that the root object exists, but not the other # objects. __traceback_info__ += (packtime,) _data, _serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def testrun(self): try: self.storage.tpc_begin(self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) oid = self.storage.new_oid() p = zodb_pickle(MinPO("c")) self.storage.store(oid, ZERO, p, '', self.trans) self.myvote() self.storage.tpc_finish(self.trans) except ClientDisconnected: pass
def checkRecoverUndoInVersion(self): oid = self._storage.new_oid() version = "aVersion" revid_a = self._dostore(oid, data=MinPO(91)) revid_b = self._dostore(oid, revid=revid_a, version=version, data=MinPO(92)) revid_c = self._dostore(oid, revid=revid_b, version=version, data=MinPO(93)) self._undo(self._storage.undoInfo()[0]['id'], [oid]) self._commitVersion(version, '') self._undo(self._storage.undoInfo()[0]['id'], [oid]) # now copy the records to a new storage self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst) # The last two transactions were applied directly rather than # copied. So we can't use compare() to verify that they new # transactions are applied correctly. (The new transactions # will have different timestamps for each storage.) self._abortVersion(version) self.assert_(self._storage.versionEmpty(version)) self._undo(self._storage.undoInfo()[0]['id'], [oid]) self.assert_(not self._storage.versionEmpty(version)) # check the data is what we expect it to be data, revid = self._storage.load(oid, version) self.assertEqual(zodb_unpickle(data), MinPO(92)) data, revid = self._storage.load(oid, '') self.assertEqual(zodb_unpickle(data), MinPO(91)) # and swap the storages tmp = self._storage self._storage = self._dst self._abortVersion(version) self.assert_(self._storage.versionEmpty(version)) self._undo(self._storage.undoInfo()[0]['id'], [oid]) self.assert_(not self._storage.versionEmpty(version)) # check the data is what we expect it to be data, revid = self._storage.load(oid, version) self.assertEqual(zodb_unpickle(data), MinPO(92)) data, revid = self._storage.load(oid, '') self.assertEqual(zodb_unpickle(data), MinPO(91)) # swap them back self._storage = tmp # Now remove _dst and copy all the transactions a second time. # This time we will be able to confirm via compare(). self._dst.close() self._dst.cleanup() self._dst = self.new_dest() self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def checkanalyze(self): import types from BTrees.OOBTree import OOBTree from ZODB.scripts import analyze # Set up a module to act as a broken import module_name = 'brokenmodule' module = types.ModuleType(module_name) sys.modules[module_name] = module class Broken(MinPO): __module__ = module_name module.Broken = Broken oids = [[self._storage.new_oid(), None] for i in range(3)] def store(i, data): oid, revid = oids[i] self._storage.store(oid, revid, data, "", t) for i in range(2): t = TransactionMetaData() self._storage.tpc_begin(t) # sometimes data is in this format store(0, dumps(OOBTree, _protocol)) # and it could be from a broken module store(1, dumps(Broken, _protocol)) # but mostly it looks like this store(2, zodb_pickle(MinPO(2))) self._storage.tpc_vote(t) tid = self._storage.tpc_finish(t) for oid_revid in oids: oid_revid[1] = tid # now break the import of the Broken class del sys.modules[module_name] # from ZODB.scripts.analyze.analyze fsi = self._storage.iterator() rep = analyze.Report() for txn in fsi: analyze.analyze_trans(rep, txn) # from ZODB.scripts.analyze.report typemap = sorted(rep.TYPEMAP.keys()) cumpct = 0.0 for t in typemap: pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES cumpct += pct self.assertAlmostEqual(cumpct, 100.0, 0, "Failed to analyze some records")
def checkRestoreAfterDoubleCommit(self): oid = self._storage.new_oid() revid = b'\0' * 8 data1 = zodb_pickle(MinPO(11)) data2 = zodb_pickle(MinPO(12)) # Begin the transaction t = transaction.Transaction() try: self._storage.tpc_begin(t) # Store an object self._storage.store(oid, revid, data1, '', t) # Store it again self._storage.store(oid, revid, data2, '', t) # Finish the transaction self._storage.tpc_vote(t) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def run(self): self.storage.tpc_begin(self.trans) oid = self.storage.new_oid() self.storage.store(oid, ZERO, zodb_pickle(MinPO("c")), '', self.trans) self.storage.tpc_vote(self.trans) self.threadStartedEvent.set() self.doNextEvent.wait(10) try: self.storage.tpc_finish(self.trans) except ZEO.Exceptions.ClientStorageError: self.gotValueError = 1 self.storage.tpc_abort(self.trans)
def checkPackWithGCOnDestinationAfterRestore(self): raises = self.assertRaises closing = self._closing db = closing(DB(self._storage)) conn = closing(db.open()) root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note(u'root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note(u'root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note(u'root -X->') txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # If the source storage is a history-free storage, all # of the transactions are now marked as packed in the # destination storage. To trigger a pack, we have to # add another transaction to the destination that is # not packed. db2 = closing(DB(self._dst)) conn2 = closing(db2.open()) conn2.root().extra = 0 txn = transaction.get() txn.note(u'root.extra = 0') txn.commit() # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. _data, _serial = self._dst.load(root._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
def check10Kstores(self): # The _get_cached_serial() method has a special case # every 8000 calls. Make sure it gets minimal coverage. oids = [[self._storage.new_oid(), None] for i in range(100)] for i in range(100): t = transaction.Transaction() self._storage.tpc_begin(t) for j in range(100): o = MinPO(j) oid, revid = oids[j] serial = self._storage.store(oid, revid, zodb_pickle(o), "", t) oids[j][1] = serial self._storage.tpc_vote(t) self._storage.tpc_finish(t)
def checkRestoreWithMultipleObjectsInUndoRedo(self): from ZODB.FileStorage import FileStorage # Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1 # FileStorage._data_find() had an off-by-8 error, neglecting to # account for the size of the backpointer when searching a # transaction with multiple data records. The results were # unpredictable. For example, it could raise a Python exception # due to passing a negative offset to file.seek(), or could # claim that a transaction didn't have data for an oid despite # that it actually did. # # The former failure mode was seen in real life, in a ZRS secondary # doing recovery. On my box today, the second failure mode is # what happens in this test (with an unpatched _data_find, of # course). Note that the error can only "bite" if more than one # data record is in a transaction, and the oid we're looking for # follows at least one data record with a backpointer. # # Unfortunately, _data_find() is a low-level implementation detail, # and this test does some horrid white-box abuse to test it. is_filestorage = isinstance(self._storage, FileStorage) db = DB(self._storage) c = db.open() r = c.root() # Create some objects. r["obj1"] = MinPO(1) r["obj2"] = MinPO(1) transaction.commit() # Add x attributes to them. r["obj1"].x = "x1" r["obj2"].x = "x2" transaction.commit() r = db.open().root() self.assertEqual(r["obj1"].x, "x1") self.assertEqual(r["obj2"].x, "x2") # Dirty tricks. if is_filestorage: obj1_oid = r["obj1"]._p_oid obj2_oid = r["obj2"]._p_oid # This will be the offset of the next transaction, which # will contain two backpointers. pos = self._storage.getSize() # Undo the attribute creation. info = self._storage.undoInfo() tid = info[0]["id"] t = Transaction() self._storage.tpc_begin(t) oids = self._storage.undo(tid, t) self._storage.tpc_vote(t) self._storage.tpc_finish(t) r = db.open().root() self.assertRaises(AttributeError, getattr, r["obj1"], "x") self.assertRaises(AttributeError, getattr, r["obj2"], "x") if is_filestorage: # _data_find should find data records for both objects in that # transaction. Without the patch, the second assert failed # (it claimed it couldn't find a data record for obj2) on my # box, but other failure modes were possible. self.assertTrue(self._storage._data_find(pos, obj1_oid, "") > 0) self.assertTrue(self._storage._data_find(pos, obj2_oid, "") > 0) # The offset of the next ("redo") transaction. pos = self._storage.getSize() # Undo the undo (restore the attributes). info = self._storage.undoInfo() tid = info[0]["id"] t = Transaction() self._storage.tpc_begin(t) oids = self._storage.undo(tid, t) self._storage.tpc_vote(t) self._storage.tpc_finish(t) r = db.open().root() self.assertEqual(r["obj1"].x, "x1") self.assertEqual(r["obj2"].x, "x2") if is_filestorage: # Again _data_find should find both objects in this txn, and # again the second assert failed on my box. self.assertTrue(self._storage._data_find(pos, obj1_oid, "") > 0) self.assertTrue(self._storage._data_find(pos, obj2_oid, "") > 0) # Indirectly provoke .restore(). .restore in turn indirectly # provokes _data_find too, but not usefully for the purposes of # the specific bug this test aims at: copyTransactionsFrom() uses # storage iterators that chase backpointers themselves, and # return the data they point at instead. The result is that # _data_find didn't actually see anything dangerous in this # part of the test. self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def checkRestoreWithMultipleObjectsInUndoRedo(self): from ZODB.FileStorage import FileStorage # Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1 # FileStorage._data_find() had an off-by-8 error, neglecting to # account for the size of the backpointer when searching a # transaction with multiple data records. The results were # unpredictable. For example, it could raise a Python exception # due to passing a negative offset to file.seek(), or could # claim that a transaction didn't have data for an oid despite # that it actually did. # # The former failure mode was seen in real life, in a ZRS secondary # doing recovery. On my box today, the second failure mode is # what happens in this test (with an unpatched _data_find, of # course). Note that the error can only "bite" if more than one # data record is in a transaction, and the oid we're looking for # follows at least one data record with a backpointer. # # Unfortunately, _data_find() is a low-level implementation detail, # and this test does some horrid white-box abuse to test it. is_filestorage = isinstance(self._storage, FileStorage) db = DB(self._storage) c = db.open() r = c.root() # Create some objects. r["obj1"] = MinPO(1) r["obj2"] = MinPO(1) transaction.commit() # Add x attributes to them. r["obj1"].x = 'x1' r["obj2"].x = 'x2' transaction.commit() r = db.open().root() self.assertEquals(r["obj1"].x, 'x1') self.assertEquals(r["obj2"].x, 'x2') # Dirty tricks. if is_filestorage: obj1_oid = r["obj1"]._p_oid obj2_oid = r["obj2"]._p_oid # This will be the offset of the next transaction, which # will contain two backpointers. pos = self._storage.getSize() # Undo the attribute creation. info = self._storage.undoInfo() tid = info[0]['id'] t = Transaction() self._storage.tpc_begin(t) oids = self._storage.undo(tid, t) self._storage.tpc_vote(t) self._storage.tpc_finish(t) r = db.open().root() self.assertRaises(AttributeError, getattr, r["obj1"], 'x') self.assertRaises(AttributeError, getattr, r["obj2"], 'x') if is_filestorage: # _data_find should find data records for both objects in that # transaction. Without the patch, the second assert failed # (it claimed it couldn't find a data record for obj2) on my # box, but other failure modes were possible. self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0) self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0) # The offset of the next ("redo") transaction. pos = self._storage.getSize() # Undo the undo (restore the attributes). info = self._storage.undoInfo() tid = info[0]['id'] t = Transaction() self._storage.tpc_begin(t) oids = self._storage.undo(tid, t) self._storage.tpc_vote(t) self._storage.tpc_finish(t) r = db.open().root() self.assertEquals(r["obj1"].x, 'x1') self.assertEquals(r["obj2"].x, 'x2') if is_filestorage: # Again _data_find should find both objects in this txn, and # again the second assert failed on my box. self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0) self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0) # Indirectly provoke .restore(). .restore in turn indirectly # provokes _data_find too, but not usefully for the purposes of # the specific bug this test aims at: copyTransactionsFrom() uses # storage iterators that chase backpointers themselves, and # return the data they point at instead. The result is that # _data_find didn't actually see anything dangerous in this # part of the test. self._dst.copyTransactionsFrom(self._storage) self.compare(self._storage, self._dst)
def _start_txn(self): txn = transaction.Transaction() self._storage.tpc_begin(txn) oid = self._storage.new_oid() self._storage.store(oid, ZERO, zodb_pickle(MinPO(1)), '', txn) return oid, txn
def checkanalyze(self): import new, sys, pickle from BTrees.OOBTree import OOBTree from ZODB.scripts import analyze # Set up a module to act as a broken import module_name = 'brokenmodule' module = new.module(module_name) sys.modules[module_name] = module class Broken(MinPO): __module__ = module_name module.Broken = Broken oids = [[self._storage.new_oid(), None] for i in range(3)] for i in range(2): t = transaction.Transaction() self._storage.tpc_begin(t) # sometimes data is in this format j = 0 oid, revid = oids[j] serial = self._storage.store(oid, revid, pickle.dumps(OOBTree, 1), "", t) oids[j][1] = serial # and it could be from a broken module j = 1 oid, revid = oids[j] serial = self._storage.store(oid, revid, pickle.dumps(Broken, 1), "", t) oids[j][1] = serial # but mostly it looks like this j = 2 o = MinPO(j) oid, revid = oids[j] serial = self._storage.store(oid, revid, zodb_pickle(o), "", t) oids[j][1] = serial self._storage.tpc_vote(t) self._storage.tpc_finish(t) # now break the import of the Broken class del sys.modules[module_name] # from ZODB.scripts.analyze.analyze fsi = self._storage.iterator() rep = analyze.Report() for txn in fsi: analyze.analyze_trans(rep, txn) # from ZODB.scripts.analyze.report typemap = rep.TYPEMAP.keys() typemap.sort() cumpct = 0.0 for t in typemap: pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES cumpct += pct self.assertAlmostEqual(cumpct, 100.0, 0, "Failed to analyze some records")