def checkBackwardTimeTravelWithRevertWhenStale(self): # If revert_when_stale is true, when the database # connection is stale (such as through failover to an # asynchronous slave that is not fully up to date), the poller # should notice that backward time travel has occurred and # invalidate all objects that have changed in the interval. self._storage = self.make_storage(revert_when_stale=True) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: transaction.begin() c = db.open() r = c.root() r["alpha"] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: transaction.begin() fs = FileStorage(os.path.join(d, "Data.fs")) fs.copyTransactionsFrom(c._storage) r["beta"] = PersistentMapping() transaction.commit() self.assertTrue("beta" in r) c._storage.zap_all(reset_oid=False, slow=True) c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # r should still be in the cache. self.assertTrue("beta" in r) # Now sync, which will call poll_invalidations(). c.sync() # r should have been invalidated self.assertEqual(r._p_changed, None) # r should be reverted to its earlier state. self.assertFalse("beta" in r) finally: db.close()
def checkBackwardTimeTravelWithRevertWhenStale(self): # If revert_when_stale is true, when the database # connection is stale (such as through failover to an # asynchronous slave that is not fully up to date), the poller # should notice that backward time travel has occurred and # invalidate all objects that have changed in the interval. self._storage = self.make_storage(revert_when_stale=True) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: transaction.begin() c = db.open() r = c.root() r['alpha'] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: transaction.begin() fs = FileStorage(os.path.join(d, 'Data.fs')) fs.copyTransactionsFrom(c._storage) r['beta'] = PersistentMapping() transaction.commit() self.assertTrue('beta' in r) c._storage.zap_all(reset_oid=False, slow=True) c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # r should still be in the cache. self.assertTrue('beta' in r) # Now sync, which will call poll_invalidations(). c.sync() # r should have been invalidated self.assertEqual(r._p_changed, None) # r should be reverted to its earlier state. self.assertFalse('beta' in r) finally: db.close()
def checkBackwardTimeTravel(self): # When a failover event causes the storage to switch to an # asynchronous slave that is not fully up to date, the poller # should notice that backward time travel has occurred and # handle the situation by invalidating all objects that have # changed in the interval. (Currently, we simply invalidate all # objects when backward time travel occurs.) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: c = db.open() r = c.root() r['alpha'] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: fs = FileStorage(os.path.join(d, 'Data.fs')) fs.copyTransactionsFrom(c._storage) r['beta'] = PersistentMapping() transaction.commit() self.assertTrue('beta' in r) c._storage.zap_all() c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # r should still be in the cache. self.assertTrue('beta' in r) # Now sync, which will call poll_invalidations(). c.sync() # r should have been invalidated self.assertEqual(r._p_changed, None) # r should be reverted to its earlier state. self.assertFalse('beta' in r) finally: db.close()
def checkBackwardTimeTravel(self): # When a failover event causes the storage to switch to an # asynchronous slave that is not fully up to date, the poller # should notice that backward time travel has occurred and # handle the situation by invalidating all objects that have # changed in the interval. (Currently, we simply invalidate all # objects when backward time travel occurs.) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: c = db.open() r = c.root() r['alpha'] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: fs = FileStorage(os.path.join(d, 'Data.fs')) fs.copyTransactionsFrom(c._storage) r['beta'] = PersistentMapping() transaction.commit() self.assertTrue('beta' in r) c._storage.zap_all() c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # r should still be in the cache. self.assertTrue('beta' in r) # Now sync, which will call poll_invalidations(). c.sync() # r should have been invalidated self.assertEqual(r._p_changed, None) # r should be reverted to its earlier state. self.assertFalse('beta' in r) finally: db.close()
def checkBackwardTimeTravelWithoutRevertWhenStale(self): # If revert_when_stale is false (the default), when the database # connection is stale (such as through failover to an # asynchronous slave that is not fully up to date), the poller # should notice that backward time travel has occurred and # raise a ReadConflictError. self._storage = self.make_storage(revert_when_stale=False) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: c = db.open() r = c.root() r["alpha"] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: fs = FileStorage(os.path.join(d, "Data.fs")) fs.copyTransactionsFrom(c._storage) r["beta"] = PersistentMapping() transaction.commit() self.assertTrue("beta" in r) c._storage.zap_all(reset_oid=False, slow=True) c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # Sync, which will call poll_invalidations(). c.sync() # Try to load an object, which should cause ReadConflictError. r._p_deactivate() self.assertRaises(ReadConflictError, lambda: r["beta"]) finally: db.close()
def checkBackwardTimeTravelWithoutRevertWhenStale(self): # If revert_when_stale is false (the default), when the database # connection is stale (such as through failover to an # asynchronous slave that is not fully up to date), the poller # should notice that backward time travel has occurred and # raise a ReadConflictError. self._storage = self.make_storage(revert_when_stale=False) import os import shutil import tempfile from ZODB.FileStorage import FileStorage db = DB(self._storage) try: c = db.open() r = c.root() r['alpha'] = PersistentMapping() transaction.commit() # To simulate failover to an out of date async slave, take # a snapshot of the database at this point, change some # object, then restore the database to its earlier state. d = tempfile.mkdtemp() try: fs = FileStorage(os.path.join(d, 'Data.fs')) fs.copyTransactionsFrom(c._storage) r['beta'] = PersistentMapping() transaction.commit() self.assertTrue('beta' in r) c._storage.zap_all(reset_oid=False) c._storage.copyTransactionsFrom(fs) fs.close() finally: shutil.rmtree(d) # Sync, which will call poll_invalidations(). c.sync() # Try to load an object, which should cause ReadConflictError. r._p_deactivate() self.assertRaises(ReadConflictError, lambda: r['beta']) finally: db.close()
class NEOMigrate(object): from neo.lib.config import OptionList @classmethod def _buildOptionParser(cls): parser = cls.option_parser parser.description = "NEO <-> FileStorage conversion tool" parser('c', 'cluster', required=True, help='the NEO cluster name') parser.bool('q', 'quiet', help='print nothing to standard output') parser.argument('source', help='the source database') parser.argument('destination', help='the destination database') def __init__(self, config): self.name = config.pop('cluster') self.source = config.pop('source') self.destination = config.pop('destination') self.quiet = config.pop('quiet', False) from ZODB.FileStorage import FileStorage from neo.client.Storage import Storage as NEOStorage if os.path.exists(self.source): if not self.quiet: print(import_warning) self.src = FileStorage(file_name=self.source, read_only=True) self.dst = NEOStorage(master_nodes=self.destination, name=self.name, **config) else: self.src = NEOStorage(master_nodes=self.source, name=self.name, read_only=True, **config) self.dst = FileStorage(file_name=self.destination) def run(self): if not self.quiet: print("Migrating from %s to %s" % (self.source, self.destination)) start = time.time() self.dst.copyTransactionsFrom(self.src) if not self.quiet: elapsed = time.time() - start print("Migration done in %3.5f" % elapsed)