Exemple #1
0
class HistoryFreeFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
):

    keep_history = False

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = self._storage
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst

    def compare(self, src, dest):
        # The dest storage has a truncated copy of dest, so
        # use compare_truncated() instead of compare_exact().
        self.compare_truncated(src, dest)
Exemple #2
0
class HistoryFreeFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
        ):

    keep_history = False

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = self._storage
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst

    def compare(self, src, dest):
        # The dest storage has a truncated copy of dest, so
        # use compare_truncated() instead of compare_exact().
        self.compare_truncated(src, dest)
Exemple #3
0
 def tearDown(self):
     self.storage.close()
     if self.recovered is not None:
         self.recovered.close()
     self.storage.cleanup()
     temp = FileStorage(self.dest)
     temp.close()
     temp.cleanup()
Exemple #4
0
 def tearDown(self):
     self.storage.close()
     if self.recovered is not None:
         self.recovered.close()
     self.storage.cleanup()
     temp = FileStorage(self.dest)
     temp.close()
     temp.cleanup()
Exemple #5
0
class HistoryFreeToFileStorage(RelStorageTestBase, BasicRecoveryStorage):
    # pylint:disable=abstract-method,too-many-ancestors
    keep_history = False

    def setUp(self):
        super(HistoryFreeToFileStorage, self).setUp()
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._dst.close()
        self._dst.cleanup()
        super(HistoryFreeToFileStorage, self).tearDown()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #6
0
class HistoryFreeToFileStorage(RelStorageTestBase, BasicRecoveryStorage):

    keep_history = False

    def setUp(self):
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage("Dest.fs")
Exemple #7
0
class HistoryFreeToFileStorage(RelStorageTestBase,
                               BasicRecoveryStorage):
    # pylint:disable=abstract-method,too-many-ancestors
    keep_history = False

    def setUp(self):
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #8
0
class HistoryPreservingToFileStorage(RelStorageTestBase,
                                     UndoableRecoveryStorage):
    # pylint:disable=too-many-ancestors,abstract-method,too-many-locals
    keep_history = True

    def setUp(self):
        super(HistoryPreservingToFileStorage, self).setUp()
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._dst.close()
        self._dst.cleanup()
        super(HistoryPreservingToFileStorage, self).tearDown()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #9
0
class HistoryFreeToFileStorage(
        RelStorageTestBase,
        BasicRecoveryStorage,
):

    keep_history = False

    def setUp(self):
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #10
0
class HistoryPreservingFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
    ):

    keep_history = True

    def setUp(self):
        self._dst = self.make_storage()
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst
Exemple #11
0
class HistoryPreservingFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
):

    keep_history = True

    def setUp(self):
        self._dst = self.make_storage()
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst
Exemple #12
0
class HistoryPreservingToFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
):

    keep_history = True

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #13
0
class HistoryPreservingToFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
        ):

    keep_history = True

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #14
0
class HistoryFreeFromFileStorage(RelStorageTestBase,
                                 UndoableRecoveryStorage):
    # pylint:disable=abstract-method,too-many-ancestors
    keep_history = False

    def setUp(self):
        self._dst = self._storage
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst

    def compare(self, src, dest):
        # The dest storage has a truncated copy of dest, so
        # use compare_truncated() instead of compare_exact().
        self.compare_truncated(src, dest)
Exemple #15
0
Fichier : db.py Projet : dpdani/csu
class cDB(object):
	"""
	Questa classe rappresenta un database di ZODB.
	Questa classe fornirà funzioni aggiuntive rispetto a quelle già esistenti costruite sopra a quelle fornite da ZODB.
	Per far sì che un oggetto venga serializzato assicurarsi che sia istanza di una classe figlia di Persistent.
	Per le liste o per i dizionari è possibile usare (rispettivamente) utils.plist e utils.XXBTree.
	"""

	def __init__(self):
		self.__set_to_none()

	def start(self, path="", storage=None, is_app_db=False):
		"""
		Questa funzione permette di inizializzare un database.
		Se storage == None, viene utilizzato FileStorage(path), altrimenti si deve preventivamente
		inizializzare uno storage e passarlo come argomento.
		Se si passa uno storage personalizzato come argomento path verrà comunque salvato in cDB.path, ma
		non verrà utilizzato.
		Ritorna True se il database è stato appena creato, False altrimenti.

		:type path: str
		:type storage: BaseStorage
		:type is_app_db:  bool
		:rtype:     bool
		"""

		# TODO: this function should ask wether it has to create the database (or not?).
		# TODO: handle ZODB lock error due to incorrect last close of the app
		#       for now forcing close with task manager and reopening the app works

		self.is_app_db = is_app_db

		if storage is None:
			if path != "":
				self.storage = FileStorage(path)
				self.path = path
			else:
				raise ValueError(
					"Ungiven argument path_to_db while assumed you have to give it. (storage = None)")
		else:
			self.storage = storage
			if not path:
				self.path = None
			else:
				self.path = path

		self.db = DB(self.storage)
		self.connection = self.db.open()
		self._root = self.connection.root()

		is_new = self._is_new(self._root)

		transaction.begin()     # begin initial db creation transaction. in case the db is getting created
						# (aka is_new = True), we have to manage a mini-transaction for 'ROOT' and
						# 'VERSION' to get saved.

		if is_new:
			self._root['ROOT'] = Root(create=True, is_app_db=self.is_app_db)
			self._root['VERSION'] = utils.SUPPORTED_DATABASE_VERSION
			self.root = self._root['ROOT']
			self.db_version = self._root['VERSION']
		else:
			self.root = self._root['ROOT']
			self.db_version = self._root['VERSION']

		if self.db_version != utils.SUPPORTED_DATABASE_VERSION:
			raise exc.DB_UnsupportedDatabaseVersion(self.db_version, utils.SUPPORTED_DATABASE_VERSION)

		transaction.commit()    # end of initial db creation transaction
		transaction.begin()     # start of db use transaction

		return is_new

	def _is_new(self, tmp_root):
		"""
		This function is used internally to determinate whether this is a new database.
		Will not work externally.
		"""
		return isinstance(tmp_root, PersistentMapping) and len(tmp_root.keys()) == 0

	def __set_to_none(self):
		"""
		Sets internal parameters to None.
		Used in __init__ and close.
		"""
		self.storage = None
		self.path = None
		self.db = None
		self.connection = None
		self._root = None
		self.root = None
		self.db_version = None
		self.is_app_db = None

	def copy_to(self, path="", storage=None, close_when_finished=False):
		"""
		Questa funzione permette di copiare il contenuto di questo database in un altro database.
		Gli argomenti sono analoghi a quelli di cDB.start e vi vengono passati direttamente.
		L'argomento close_when_finished indica (se falso) che il nuovo database verrà restituito
		in seguito all'operazione di copiatura, o (se vero) che verrà chiuso successivamente
		alla copiatura e poi restituito.
		"""

		new_db = cDB()  # Creiamo l'istanza vuota del nuovo database.
		new_db.start(path, storage)  # Inizializziamo il database.
		new_db.root = self.root  # Copiamo il contenuto di questo database nel nuovo.

		if close_when_finished:  # Chiudiamo la connessione al nuovo database, o...
			new_db.close_connection()

		return new_db  # Ritorniamo il nuovo database.

	def delete(self):
		"""
		Questa funzione permette di cancellare questo database dal disco.
		WARNING: USE WITH CAUTION.
		"""
		self.close(save=False, set_to_none=False)
		self.storage.cleanup()
		self.__set_to_none()

	def save(self, save_plugins=True, encrypt=False, key=None):
		if not self.is_app_db and save_plugins:
			plugins_savs = self.instruct_plugins()
			if plugins_savs is None:
				exc.warn("", exc.DB_InstructPluginsNotSet) # message is provided by the exc module
			else:
				for plugin_id in plugins_savs.keys():
					# TODO: if the plugin has not been enabled by the user, raise an exception
					self.root['Plugins'][plugin_id] = plugins_savs[plugin_id]
					transaction.commit()

		self._root['ROOT'] = self.root
		self._root._p_changed = True
		transaction.commit()

		# save completed. now encrypt if requested
		if encrypt:
			if self.path is None:
				raise exc.DB_NoPathOnEncryption()
			if key is None:
				raise exc.DB_NoKeyOnEncryption()
			security.encrypt_file(self.path, key)

	def instruct_plugins(self):
		"""
		This function instructs the plugins about entering the saving state.
		This function will be replaced with an other one set by the DB APIService.
		The replaced method should return a non-None value (refer to cDB.save).
		This functon should always return a dictionary.
		"""
		return None

	def close(self, save=True, save_plugins=True, encrypt=False, key=None, set_to_none=True):
		if save:
			self.save(save_plugins=save_plugins, encrypt=encrypt, key=key)
		self.close_connection()
		self.close_db()
		self.close_storage()
		if set_to_none:
			self.__set_to_none()

	def close_connection(self):
		"""
		This is a short-hand function for self.connection.close().
		This function suppose that if self.connection is None, nothing will happen.
		"""
		if self.connection is not None:
			self.connection.close()

	def close_db(self):
		"""
		This is a short-hand function for self.db.close().
		This function suppose that if self.db is None, nothing will happen.
		"""
		if self.db is not None:
			self.db.close()

	def close_storage(self):
		"""
		This is a short-hand function for self.storage.close().
		This function suppose that if self.storage is None, nothing will happen.
		"""
		if self.storage is not None:
			self.storage.close()

	def decrypt_and_start(self, key, path, is_app_db=False):
		"""
		:type path: str
		:type is_app_db: bool
		:rtype: bool
		Decrypts path and calls cDB.start() with given arguments.
		Path cannot be an empty string. This basically means that
		FileStorage is the only supported storage for this function.
		"""
		# maybe it could be possible to implement a storage
		# that automatically encrypts/decrypts
		try:
			security.decrypt_file(path, key)
		except security.DecryptionException, e:
			raise exc.DB_CannotDecryptDatabase(path, e)
		return self.start(path, storage=None, is_app_db=is_app_db)
class QueueConflictTests(unittest.TestCase):
    def _setAlternativePolicy(self):
        # Apply the alternative conflict resolution policy
        self.queue._conflict_policy = ALTERNATIVE_POLICY
        self.queue._p_jar.transaction_manager.commit()
        self.queue2._p_jar.sync()

        self.assertEquals(self.queue._conflict_policy, ALTERNATIVE_POLICY)
        self.assertEquals(self.queue2._conflict_policy, ALTERNATIVE_POLICY)

    def _insane_update(self, queue, uid, etype):
        # Queue update method that allows insane state changes, needed
        # to provoke pathological queue states
        data = queue._data
        current = data.get(uid)
        if current is not None:
            generation, current = current

            if ((current is ADDED or current is CHANGED_ADDED)
                    and etype is CHANGED):
                etype = CHANGED_ADDED
        else:
            generation = 0

        data[uid] = generation + 1, etype

        queue._p_changed = 1

    def openDB(self):
        from ZODB.FileStorage import FileStorage
        from ZODB.DB import DB
        self.dir = tempfile.mkdtemp()
        self.storage = FileStorage(os.path.join(self.dir,
                                                'testQCConflicts.fs'))
        self.db = DB(self.storage)

    def setUp(self):
        self.openDB()
        queue = CatalogEventQueue()

        tm1 = transaction.TransactionManager()
        self.conn1 = self.db.open(transaction_manager=tm1)
        r1 = self.conn1.root()
        r1["queue"] = queue
        del queue
        self.queue = r1["queue"]
        tm1.commit()

        tm2 = transaction.TransactionManager()
        self.conn2 = self.db.open(transaction_manager=tm2)
        r2 = self.conn2.root()
        self.queue2 = r2["queue"]
        ignored = dir(self.queue2)  # unghostify

    def tearDown(self):
        transaction.abort()
        del self.queue
        del self.queue2
        if self.storage is not None:
            self.storage.close()
            self.storage.cleanup()
            shutil.rmtree(self.dir)

    def test_rig(self):
        # Test the test rig
        self.assertEqual(self.queue._p_serial, self.queue2._p_serial)

    def test_simpleConflict(self):
        # Using the first connection, index 10 paths
        for n in range(10):
            self.queue.update('/f%i' % n, ADDED)
        self.queue._p_jar.transaction_manager.commit()

        # After this run, the first connection's queuecatalog has 10
        # entries, the second has none.
        self.assertEqual(len(self.queue), 10)
        self.assertEqual(len(self.queue2), 0)

        # Using the second connection, index the other 10 folders
        for n in range(10):
            self.queue2.update('/g%i' % n, ADDED)

        # Now both connections' queuecatalogs have 10 entries each, but
        # for differrent objects
        self.assertEqual(len(self.queue), 10)
        self.assertEqual(len(self.queue2), 10)

        # Now we commit. Conflict resolution on the catalog queue should
        # kick in because both connections have changes. Since none of the
        # events collide, we should end up with 20 entries in our catalogs.
        self.queue2._p_jar.transaction_manager.commit()
        self.queue._p_jar.sync()
        self.queue2._p_jar.sync()
        self.assertEqual(len(self.queue), 20)
        self.assertEqual(len(self.queue2), 20)

    def test_unresolved_add_after_something(self):
        # If an  event is encountered for an object and we are trying to
        # commit an ADDED event, a conflict is encountered

        # Mutilate the logger so we don't see complaints about the
        # conflict we are about to provoke
        from Products.QueueCatalog.QueueCatalog import logger
        logger.disabled = 1

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self.queue2.update('/f0', ADDED)
        self.queue2.update('/f0', CHANGED)
        self.queue2._p_jar.transaction_manager.commit()

        self._insane_update(self.queue, '/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self._insane_update(self.queue2, '/f0', ADDED)
        self.assertRaises(ConflictError,
                          self.queue2._p_jar.transaction_manager.commit)

        # cleanup the logger
        logger.disabled = 0

    def test_resolved_add_after_nonremoval(self):
        # If an  event is encountered for an object and we are trying to
        # commit an ADDED event while the conflict resolution policy is
        # NOT the SAFE_POLICY, we won't get a conflict.
        self._setAlternativePolicy()

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self.queue2.update('/f0', ADDED)
        self.queue2.update('/f0', CHANGED)
        self.queue2._p_jar.transaction_manager.commit()

        self._insane_update(self.queue, '/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        # If we had a conflict, this would blow up
        self._insane_update(self.queue2, '/f0', ADDED)
        self.queue2._p_jar.transaction_manager.commit()

        # After the conflict has been resolved, we expect the queues to
        # containa a CHANGED_ADDED event.
        self.queue._p_jar.sync()
        self.queue2._p_jar.sync()
        self.assertEquals(len(self.queue), 1)
        self.assertEquals(len(self.queue2), 1)
        event1 = self.queue.getEvent('/f0')
        event2 = self.queue2.getEvent('/f0')
        self.failUnless(event1 == event2 == CHANGED_ADDED)

    def test_resolved_add_after_removal(self):
        # If a REMOVED event is encountered for an object and we are trying to
        # commit an ADDED event while the conflict resolution policy is
        # NOT the SAFE_POLICY, we won't get a conflict.
        self._setAlternativePolicy()

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self.queue2.update('/f0', ADDED)
        self.queue2.update('/f0', CHANGED)
        self.queue2._p_jar.transaction_manager.commit()

        self.queue.update('/f0', REMOVED)
        self.queue._p_jar.transaction_manager.commit()

        # If we had a conflict, this would blow up
        self._insane_update(self.queue2, '/f0', ADDED)
        self.queue2._p_jar.transaction_manager.commit()

        # After the conflict has been resolved, we expect the queue to
        # contain a REMOVED event.
        self.queue._p_jar.sync()
        self.queue2._p_jar.sync()
        self.assertEquals(len(self.queue), 1)
        self.assertEquals(len(self.queue2), 1)
        event1 = self.queue.getEvent('/f0')
        event2 = self.queue2.getEvent('/f0')
        self.failUnless(event1 == event2 == REMOVED)

    def test_unresolved_new_old_current_all_different(self):
        # If the events we get from the current, new and old states are
        # all different, we throw in the towel in the form of a conflict.
        # This test relies on the fact that no OLD state is de-facto treated
        # as a state.

        # Mutilate the logger so we don't see complaints about the
        # conflict we are about to provoke
        from Products.QueueCatalog.QueueCatalog import logger
        logger.disabled = 1

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        # This commit should now raise a conflict
        self._insane_update(self.queue2, '/f0', REMOVED)
        self.assertRaises(ConflictError,
                          self.queue2._p_jar.transaction_manager.commit)

        # cleanup the logger
        logger.disabled = 0

    def test_resolved_new_old_current_all_different(self):
        # If the events we get from the current, new and old states are
        # all different and the SAFE_POLICY conflict resolution policy is
        # not enforced, the conflict resolves without bloodshed.
        # This test relies on the fact that no OLD state is de-facto treated
        # as a state.
        self._setAlternativePolicy()

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        # This commit should not raise a conflict
        self._insane_update(self.queue2, '/f0', REMOVED)
        self.queue2._p_jar.transaction_manager.commit()

        # In this scenario (the incoming new state has a REMOVED event),
        # the new state is disregarded and the old state is used. We are
        # left with a CHANGED_ADDED event. (see queue.update method; ADDED
        # plus CHANGED results in CHANGED_ADDED)
        self.queue._p_jar.sync()
        self.queue2._p_jar.sync()
        self.assertEquals(len(self.queue), 1)
        self.assertEquals(len(self.queue2), 1)
        event1 = self.queue.getEvent('/f0')
        event2 = self.queue2.getEvent('/f0')
        self.failUnless(event1 == event2 == CHANGED_ADDED)

    def test_unresolved_new_old_current_all_different_2(self):
        # If the events we get from the current, new and old states are
        # all different, we throw in the towel in the form of a conflict.
        # This test relies on the fact that no OLD state is de-facto treated
        # as a state.

        # Mutilate the logger so we don't see complaints about the
        # conflict we are about to provoke
        from Products.QueueCatalog.QueueCatalog import logger
        logger.disabled = 1

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self.queue2.update('/f0', ADDED)
        self.queue2.update('/f0', CHANGED)
        self.queue2._p_jar.transaction_manager.commit()

        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        # This commit should now raise a conflict
        self._insane_update(self.queue2, '/f0', REMOVED)
        self.assertRaises(ConflictError,
                          self.queue2._p_jar.transaction_manager.commit)

        # cleanup the logger
        logger.disabled = 0

    def test_resolved_new_old_current_all_different_2(self):
        # If the events we get from the current, new and old states are
        # all different and the SAFE_POLICY conflict resolution policy is
        # not enforced, the conflict resolves without bloodshed.
        self._setAlternativePolicy()

        self.queue.update('/f0', ADDED)
        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        self.queue2.update('/f0', ADDED)
        self.queue2.update('/f0', CHANGED)
        self.queue2._p_jar.transaction_manager.commit()

        self.queue.update('/f0', CHANGED)
        self.queue._p_jar.transaction_manager.commit()

        # This commit should not raise a conflict
        self._insane_update(self.queue2, '/f0', REMOVED)
        self.queue2._p_jar.transaction_manager.commit()

        # In this scenario (the incoming new state has a REMOVED event),
        # we will take the new state to resolve the conflict, because its
        # generation number is higher then the oldstate and current state.
        self.queue._p_jar.sync()
        self.queue2._p_jar.sync()
        self.assertEquals(len(self.queue), 1)
        self.assertEquals(len(self.queue2), 1)
        event1 = self.queue.getEvent('/f0')
        event2 = self.queue2.getEvent('/f0')
        self.failUnless(event1 == event2 == REMOVED)
Exemple #17
0
class RecoverTest(unittest.TestCase):

    level = 2

    path = None

    def setUp(self):
        self.path = tempfile.mktemp(suffix=".fs")
        self.storage = FileStorage(self.path)
        self.populate()
        self.dest = tempfile.mktemp(suffix=".fs")
        self.recovered = None

    def tearDown(self):
        self.storage.close()
        if self.recovered is not None:
            self.recovered.close()
        self.storage.cleanup()
        temp = FileStorage(self.dest)
        temp.close()
        temp.cleanup()

    def populate(self):
        db = ZODB.DB(self.storage)
        cn = db.open()
        rt = cn.root()

        # Create a bunch of objects; the Data.fs is about 100KB.
        for i in range(50):
            d = rt[i] = PersistentMapping()
            transaction.commit()
            for j in range(50):
                d[j] = "a" * j
            transaction.commit()

    def damage(self, num, size):
        self.storage.close()
        # Drop size null bytes into num random spots.
        for i in range(num):
            offset = random.randint(0, self.storage._pos - size)
            f = open(self.path, "a+b")
            f.seek(offset)
            f.write("\0" * size)
            f.close()

    ITERATIONS = 5

    # Run recovery, from self.path to self.dest.  Return whatever
    # recovery printed to stdout, as a string.
    def recover(self):
        orig_stdout = sys.stdout
        faux_stdout = StringIO.StringIO()
        try:
            sys.stdout = faux_stdout
            try:
                ZODB.fsrecover.recover(self.path,
                                       self.dest,
                                       verbose=0,
                                       partial=True,
                                       force=False,
                                       pack=1)
            except SystemExit:
                raise RuntimeError("recover tried to exit")
        finally:
            sys.stdout = orig_stdout
        return faux_stdout.getvalue()

    # Caution:  because recovery is robust against many kinds of damage,
    # it's almost impossible for a call to self.recover() to raise an
    # exception.  As a result, these tests may pass even if fsrecover.py
    # is broken badly.  testNoDamage() tries to ensure that at least
    # recovery doesn't produce any error msgs if the input .fs is in
    # fact not damaged.
    def testNoDamage(self):
        output = self.recover()
        self.assert_('error' not in output, output)
        self.assert_('\n0 bytes removed during recovery' in output, output)

        # Verify that the recovered database is identical to the original.
        before = file(self.path, 'rb')
        before_guts = before.read()
        before.close()

        after = file(self.dest, 'rb')
        after_guts = after.read()
        after.close()

        self.assertEqual(before_guts, after_guts,
                         "recovery changed a non-damaged .fs file")

    def testOneBlock(self):
        for i in range(self.ITERATIONS):
            self.damage(1, 1024)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testFourBlocks(self):
        for i in range(self.ITERATIONS):
            self.damage(4, 512)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testBigBlock(self):
        for i in range(self.ITERATIONS):
            self.damage(1, 32 * 1024)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testBadTransaction(self):
        # Find transaction headers and blast them.

        L = self.storage.undoLog()
        r = L[3]
        tid = base64.decodestring(r["id"] + "\n")
        pos1 = self.storage._txn_find(tid, 0)

        r = L[8]
        tid = base64.decodestring(r["id"] + "\n")
        pos2 = self.storage._txn_find(tid, 0)

        self.storage.close()

        # Overwrite the entire header.
        f = open(self.path, "a+b")
        f.seek(pos1 - 50)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
        os.remove(self.path)
        os.rename(self.dest, self.path)

        # Overwrite part of the header.
        f = open(self.path, "a+b")
        f.seek(pos2 + 10)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()

    # Issue 1846:  When a transaction had 'c' status (not yet committed),
    # the attempt to open a temp file to write the trailing bytes fell
    # into an infinite loop.
    def testUncommittedAtEnd(self):
        # Find a transaction near the end.
        L = self.storage.undoLog()
        r = L[1]
        tid = base64.decodestring(r["id"] + "\n")
        pos = self.storage._txn_find(tid, 0)

        # Overwrite its status with 'c'.
        f = open(self.path, "r+b")
        f.seek(pos + 16)
        current_status = f.read(1)
        self.assertEqual(current_status, ' ')
        f.seek(pos + 16)
        f.write('c')
        f.close()

        # Try to recover.  The original bug was that this never completed --
        # infinite loop in fsrecover.py.  Also, in the ZODB 3.2 line,
        # reference to an undefined global masked the infinite loop.
        self.recover()

        # Verify the destination got truncated.
        self.assertEqual(os.path.getsize(self.dest), pos)

        # Get rid of the temp file holding the truncated bytes.
        os.remove(ZODB.fsrecover._trname)
Exemple #18
0
class RecoverTest(unittest.TestCase):

    level = 2

    path = None

    def setUp(self):
        self.path = tempfile.mktemp(suffix=".fs")
        self.storage = FileStorage(self.path)
        self.populate()
        self.dest = tempfile.mktemp(suffix=".fs")
        self.recovered = None

    def tearDown(self):
        self.storage.close()
        if self.recovered is not None:
            self.recovered.close()
        self.storage.cleanup()
        temp = FileStorage(self.dest)
        temp.close()
        temp.cleanup()

    def populate(self):
        db = ZODB.DB(self.storage)
        cn = db.open()
        rt = cn.root()

        # Create a bunch of objects; the Data.fs is about 100KB.
        for i in range(50):
            d = rt[i] = PersistentMapping()
            transaction.commit()
            for j in range(50):
                d[j] = "a" * j
            transaction.commit()

    def damage(self, num, size):
        self.storage.close()
        # Drop size null bytes into num random spots.
        for i in range(num):
            offset = random.randint(0, self.storage._pos - size)
            f = open(self.path, "a+b")
            f.seek(offset)
            f.write("\0" * size)
            f.close()

    ITERATIONS = 5

    # Run recovery, from self.path to self.dest.  Return whatever
    # recovery printed to stdout, as a string.
    def recover(self):
        orig_stdout = sys.stdout
        faux_stdout = StringIO.StringIO()
        try:
            sys.stdout = faux_stdout
            try:
                ZODB.fsrecover.recover(self.path, self.dest,
                        verbose=0, partial=True, force=False, pack=1)
            except SystemExit:
                raise RuntimeError("recover tried to exit")
        finally:
            sys.stdout = orig_stdout
        return faux_stdout.getvalue()

    # Caution:  because recovery is robust against many kinds of damage,
    # it's almost impossible for a call to self.recover() to raise an
    # exception.  As a result, these tests may pass even if fsrecover.py
    # is broken badly.  testNoDamage() tries to ensure that at least
    # recovery doesn't produce any error msgs if the input .fs is in
    # fact not damaged.
    def testNoDamage(self):
        output = self.recover()
        self.assert_('error' not in output, output)
        self.assert_('\n0 bytes removed during recovery' in output, output)

        # Verify that the recovered database is identical to the original.
        before = file(self.path, 'rb')
        before_guts = before.read()
        before.close()

        after = file(self.dest, 'rb')
        after_guts = after.read()
        after.close()

        self.assertEqual(before_guts, after_guts,
                         "recovery changed a non-damaged .fs file")

    def testOneBlock(self):
        for i in range(self.ITERATIONS):
            self.damage(1, 1024)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testFourBlocks(self):
        for i in range(self.ITERATIONS):
            self.damage(4, 512)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testBigBlock(self):
        for i in range(self.ITERATIONS):
            self.damage(1, 32 * 1024)
            output = self.recover()
            self.assert_('error' in output, output)
            self.recovered = FileStorage(self.dest)
            self.recovered.close()
            os.remove(self.path)
            os.rename(self.dest, self.path)

    def testBadTransaction(self):
        # Find transaction headers and blast them.

        L = self.storage.undoLog()
        r = L[3]
        tid = base64.decodestring(r["id"] + "\n")
        pos1 = self.storage._txn_find(tid, 0)

        r = L[8]
        tid = base64.decodestring(r["id"] + "\n")
        pos2 = self.storage._txn_find(tid, 0)

        self.storage.close()

        # Overwrite the entire header.
        f = open(self.path, "a+b")
        f.seek(pos1 - 50)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
        os.remove(self.path)
        os.rename(self.dest, self.path)

        # Overwrite part of the header.
        f = open(self.path, "a+b")
        f.seek(pos2 + 10)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()

    # Issue 1846:  When a transaction had 'c' status (not yet committed),
    # the attempt to open a temp file to write the trailing bytes fell
    # into an infinite loop.
    def testUncommittedAtEnd(self):
        # Find a transaction near the end.
        L = self.storage.undoLog()
        r = L[1]
        tid = base64.decodestring(r["id"] + "\n")
        pos = self.storage._txn_find(tid, 0)

        # Overwrite its status with 'c'.
        f = open(self.path, "r+b")
        f.seek(pos + 16)
        current_status = f.read(1)
        self.assertEqual(current_status, ' ')
        f.seek(pos + 16)
        f.write('c')
        f.close()

        # Try to recover.  The original bug was that this never completed --
        # infinite loop in fsrecover.py.  Also, in the ZODB 3.2 line,
        # reference to an undefined global masked the infinite loop.
        self.recover()

        # Verify the destination got truncated.
        self.assertEqual(os.path.getsize(self.dest), pos)

        # Get rid of the temp file holding the truncated bytes.
        os.remove(ZODB.fsrecover._trname)