def get_store(): """ Returns a reference to Storm Store """ zstorm = ZStorm() zstorm.set_default_uri(GLSetting.store_name, GLSetting.file_versioned_db + '?foreign_keys=ON') return zstorm.get(GLSetting.store_name)
def get_store(): """ Returns a reference to Storm Store """ zstorm = ZStorm() zstorm.set_default_uri(GLSetting.store_name, GLSetting.db_uri) return zstorm.get(GLSetting.store_name)
class BaseZStormTestCase(TestCase): def setUp(self): self.zstorm = ZStorm() self.threadpool = FakeThreadPool() self.transactor = Transactor(self.threadpool) # XXX use storm.tests.mocker # See storm.tests.twisted.transaction for an example of it's usage # # self.transaction = self.mocker.mock() # self.transactor = Transactor(self.threadpool, self.transaction) self.transactor = Transactor(self.threadpool) store = self.zstorm.create('testDB', 'sqlite:///test.db') store.execute(createQuery) store.commit() def tearDown(self): # Reset the utility to cleanup the StoreSynchronizer's from the # transaction. self.zstorm._reset() # Free the transaction to avoid having errors that cross # test cases. transaction.manager.free(transaction.get()) # Remove the test database file os.remove('test.db') def getStore(self): return self.zstorm.get('testDB')
def setUp(self): super(TemporaryDatabaseMixin, self).setUp() self.uri = 'sqlite:///%s' % self.fs.makePath() self.zstorm = ZStorm() self.zstorm.set_default_uri('main', self.uri) provideUtility(self.zstorm) self.store = self.zstorm.get('main')
def setupStore(uri, name): """Setup the main store. @param uri: The URI for the main database. """ zstorm = ZStorm() zstorm.set_default_uri(name, uri) provideUtility(zstorm) return zstorm.get(name)
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: self._zstorm = global_zstorm else: self._zstorm = ZStorm() self._schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{ "name": name, "uri": uri, "schema": schema } for name, (uri, schema) in databases.iteritems()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(self._zstorm) self._set_create_hook() for database in databases: name = database["name"] uri = database["uri"] schema = database.get("schema") schema_uri = database.get("schema-uri", uri) self._zstorm.set_default_uri(name, uri) if schema is not None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) self._schemas[name] = schema self._schema_zstorm.set_default_uri(name, schema_uri) self._ensure_schema(name, schema) # Commit all schema changes across all stores transaction.commit() elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: self._zstorm = global_zstorm else: self._zstorm = ZStorm() self._schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{"name": name, "uri": uri, "schema": schema} for name, (uri, schema) in databases.iteritems()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(self._zstorm) self._set_create_hook() for database in databases: name = database["name"] uri = database["uri"] schema = database.get("schema") schema_uri = database.get("schema-uri", uri) self._zstorm.set_default_uri(name, uri) if schema is not None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) self._schemas[name] = schema self._schema_zstorm.set_default_uri(name, schema_uri) self._ensure_schema(name, schema) # Commit all schema changes across all stores transaction.commit() elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
def setupStore(config): """Setup the main store. A C{ZStorm} instance is configured and registered as a global utility. @param config: A configuration instance. @return: A configured C{ZStorm} instance. """ zstorm = ZStorm() provideUtility(zstorm) uri = config.get('store', 'main-uri') zstorm.set_default_uri('main', uri) return zstorm
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: zstorm = ZStorm() schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{ "name": name, "uri": uri, "schema": schema } for name, (uri, schema) in databases.iteritems()] for database in databases: name = database["name"] uri = database["uri"] schema = database["schema"] schema_uri = database.get("schema-uri", uri) self._schemas[name] = schema zstorm.set_default_uri(name, uri) schema_zstorm.set_default_uri(name, schema_uri) store = zstorm.get(name) self._set_commit_proxy(store) schema_store = schema_zstorm.get(name) schema.upgrade(schema_store) # Clean up tables here to ensure that the first test run starts # with an empty db schema.delete(schema_store) provideUtility(zstorm) self._zstorm = zstorm self._schema_zstorm = schema_zstorm elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
def get_store(): """ Returns a reference to Storm Store """ zstorm = ZStorm() zstorm.set_default_uri(GLSettings.store_name, GLSettings.db_uri) retry_count = 0 last_error = None while retry_count < 5: try: return zstorm.get(GLSettings.store_name) except psycopg2.OperationalError as e: retry_count += 1 last_error = e if retry_count == 5 else None continue raise Exception('could not connect to database after 5 retries: %s' % (last_error, ))
def setup_environment(cfg): from zope.component import getGlobalSiteManager from storm.zope.zstorm import ZStorm, IZStorm gsm = getGlobalSiteManager() ex = gsm.queryUtility(IZStorm) if ex: for name, store in ex.iterstores(): ex.remove(store) try: store.close() except Exception: log.exception("Failed to close a store") gsm.unregisterUtility(ex) zs = ZStorm() gsm.registerUtility(zs) zs.set_default_uri("tilde", cfg["dburl"]) return cfg
def test_make_zstorm_overwritten(self): """ L{ZStormResourceManager.make} registers its own ZStorm again if a test has registered a new ZStorm utility overwriting the resource one. """ zstorm = self.resource.make([]) provideUtility(ZStorm()) self.resource.make([]) self.assertIs(zstorm, getUtility(IZStorm))
def setUp(self): super(PatchTest, self).setUp() self.zstorm = ZStorm() self.patchdir = self.make_path() self.pkgdir = os.path.join(self.patchdir, "mypackage") os.makedirs(self.pkgdir) f = open(os.path.join(self.pkgdir, "__init__.py"), "w") f.write("shared_data = []") f.close() # Order of creation here is important to try to screw up the # patch ordering, as os.listdir returns in order of mtime (or # something). for pname, data in [("patch_380.py", patch_test_1), ("patch_42.py", patch_test_0)]: self.add_module(pname, data) sys.path.append(self.patchdir) self.filename = self.make_path() self.uri = "sqlite:///%s" % self.filename self.store = self.zstorm.create(None, self.uri) self.store.execute("CREATE TABLE patch " "(version INTEGER NOT NULL PRIMARY KEY)") self.assertFalse(self.store.get(Patch, (42))) self.assertFalse(self.store.get(Patch, (380))) import mypackage self.mypackage = mypackage self.patch_applier = PatchApplier(self.store, self.mypackage) # Create another store just to keep track of the state of the # whole transaction manager. See the assertion functions below. self.another_store = self.zstorm.create(None, "sqlite:") self.another_store.execute("CREATE TABLE test (id INT)") self.another_store.commit() self.prepare_for_transaction_check()
def setUp(self): self.zstorm = ZStorm() self.threadpool = FakeThreadPool() self.transactor = Transactor(self.threadpool) # XXX use storm.tests.mocker # See storm.tests.twisted.transaction for an example of it's usage # # self.transaction = self.mocker.mock() # self.transactor = Transactor(self.threadpool, self.transaction) self.transactor = Transactor(self.threadpool) store = self.zstorm.create('testDB', 'sqlite:///test.db') store.execute(createQuery) store.commit()
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: zstorm = ZStorm() schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{"name": name, "uri": uri, "schema": schema} for name, (uri, schema) in databases.iteritems()] for database in databases: name = database["name"] uri = database["uri"] schema = database["schema"] schema_uri = database.get("schema-uri", uri) self._schemas[name] = schema zstorm.set_default_uri(name, uri) schema_zstorm.set_default_uri(name, schema_uri) store = zstorm.get(name) self._set_commit_proxy(store) schema_store = schema_zstorm.get(name) schema.upgrade(schema_store) # Clean up tables here to ensure that the first test run starts # with an empty db schema.delete(schema_store) provideUtility(zstorm) self._zstorm = zstorm self._schema_zstorm = schema_zstorm elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
class TemporaryDatabaseMixin(object): def setUp(self): super(TemporaryDatabaseMixin, self).setUp() self.uri = 'sqlite:///%s' % self.fs.makePath() self.zstorm = ZStorm() self.zstorm.set_default_uri('main', self.uri) provideUtility(self.zstorm) self.store = self.zstorm.get('main') def tearDown(self): self.zstorm = getUtility(IZStorm) self.zstorm.remove(self.zstorm.get('main')) super(TemporaryDatabaseMixin, self).tearDown()
class ZStormTest(TestHelper): def is_supported(self): return has_transaction def setUp(self): self.zstorm = ZStorm() def tearDown(self): # Reset the utility to cleanup the StoreSynchronizer's from the # transaction. self.zstorm._reset() # Free the transaction to avoid having errors that cross # test cases. transaction.manager.free(transaction.get()) def test_create(self): store = self.zstorm.create(None, "sqlite:") self.assertTrue(isinstance(store, Store)) def test_create_twice_unnamed(self): store = self.zstorm.create(None, "sqlite:") store.execute("CREATE TABLE test (id INTEGER)") store.commit() store = self.zstorm.create(None, "sqlite:") self.assertRaises(OperationalError, store.execute, "SELECT * FROM test") def test_create_twice_same_name(self): store = self.zstorm.create("name", "sqlite:") self.assertRaises(ZStormError, self.zstorm.create, "name", "sqlite:") def test_create_and_get_named(self): store = self.zstorm.create("name", "sqlite:") self.assertTrue(self.zstorm.get("name") is store) def test_create_and_get_named_another_thread(self): store = self.zstorm.create("name", "sqlite:") raised = [] def f(): try: self.zstorm.get("name") except ZStormError: raised.append(True) thread = threading.Thread(target=f) thread.start() thread.join() self.assertTrue(raised) def test_get_unexistent(self): self.assertRaises(ZStormError, self.zstorm.get, "name") def test_get_with_uri(self): store = self.zstorm.get("name", "sqlite:") self.assertTrue(isinstance(store, Store)) self.assertTrue(self.zstorm.get("name") is store) self.assertTrue(self.zstorm.get("name", "sqlite:") is store) def test_set_default_uri(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.get("name") self.assertTrue(isinstance(store, Store)) def test_create_default(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.create("name") self.assertTrue(isinstance(store, Store)) def test_create_default_twice(self): self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.create("name") self.assertRaises(ZStormError, self.zstorm.create, "name") def test_iterstores(self): store1 = self.zstorm.create(None, "sqlite:") store2 = self.zstorm.create(None, "sqlite:") store3 = self.zstorm.create("name", "sqlite:") stores = [] for name, store in self.zstorm.iterstores(): stores.append((name, store)) self.assertEquals(len(stores), 3) self.assertEquals(set(stores), set([(None, store1), (None, store2), ("name", store3)])) def test_get_name(self): store = self.zstorm.create("name", "sqlite:") self.assertEquals(self.zstorm.get_name(store), "name") def test_get_name_with_removed_store(self): store = self.zstorm.create("name", "sqlite:") self.assertEquals(self.zstorm.get_name(store), "name") self.zstorm.remove(store) self.assertEquals(self.zstorm.get_name(store), None) def test_default_databases(self): self.zstorm.set_default_uri("name1", "sqlite:1") self.zstorm.set_default_uri("name2", "sqlite:2") self.zstorm.set_default_uri("name3", "sqlite:3") default_uris = self.zstorm.get_default_uris() self.assertEquals(default_uris, {"name1": "sqlite:1", "name2": "sqlite:2", "name3": "sqlite:3"}) def test_register_store_for_tpc_transaction(self): """ Setting a store to use two-phase-commit mode, makes ZStorm call its begin() method when it joins the transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") xids = [] store.begin = lambda xid: xids.append(xid) store.execute("SELECT 1") [xid] = xids self.assertEqual(0, xid.format_id) self.assertEqual("_storm", xid.global_transaction_id[:6]) self.assertEqual("name", xid.branch_qualifier) def test_register_store_for_tpc_transaction_uses_per_transaction_id(self): """ Two stores in two-phase-commit mode joining the same transaction share the same global transaction ID. """ self.zstorm.set_default_uri("name1", "sqlite:1") self.zstorm.set_default_uri("name2", "sqlite:2") self.zstorm.set_default_tpc("name1", True) self.zstorm.set_default_tpc("name2", True) store1 = self.zstorm.get("name1") store2 = self.zstorm.get("name2") xids = [] store1.begin = lambda xid: xids.append(xid) store2.begin = lambda xid: xids.append(xid) store1.execute("SELECT 1") store2.execute("SELECT 1") [xid1, xid2] = xids self.assertEqual(xid1.global_transaction_id, xid2.global_transaction_id) def test_register_store_for_tpc_transaction_uses_unique_global_ids(self): """ Each global transaction gets assigned a unique ID. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") xids = [] store.begin = lambda xid: xids.append(xid) store.execute("SELECT 1") transaction.abort() store.execute("SELECT 1") transaction.abort() [xid1, xid2] = xids self.assertNotEqual(xid1.global_transaction_id, xid2.global_transaction_id) def test_transaction_with_two_phase_commit(self): """ If a store is set to use TPC, than the associated data manager will call its prepare() and commit() methods when committing. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") calls = [] store.begin = lambda xid: calls.append("begin") store.prepare = lambda: calls.append("prepare") store.commit = lambda: calls.append("commit") store.execute("SELECT 1") transaction.commit() self.assertEqual(["begin", "prepare", "commit"], calls) def test_transaction_with_single_and_two_phase_commit_stores(self): """ When there are both stores in single-phase and two-phase mode, the ones in single-phase mode are committed first. This makes it possible to actually achieve two-phase commit behavior when only one store doesn't support TPC. """ self.zstorm.set_default_uri("name1", "sqlite:1") self.zstorm.set_default_uri("name2", "sqlite:2") self.zstorm.set_default_tpc("name1", True) self.zstorm.set_default_tpc("name2", False) store1 = self.zstorm.get("name1") store2 = self.zstorm.get("name2") commits = [] store1.begin = lambda xid: None store1.prepare = lambda: None store1.commit = lambda: commits.append("commit1") store2.commit = lambda: commits.append("commit2") store1.execute("SELECT 1") store2.execute("SELECT 1") transaction.commit() self.assertEqual(["commit2", "commit1"], commits) def _isInTransaction(self, store): """Check if a Store is part of the current transaction.""" for dm in transaction.get()._resources: if isinstance(dm, StoreDataManager) and dm._store is store: return True return False def assertInTransaction(self, store): """Check that the given store is joined to the transaction.""" self.assertTrue(self._isInTransaction(store), "%r should be joined to the transaction" % store) def assertNotInTransaction(self, store): """Check that the given store is not joined to the transaction.""" self.assertTrue(not self._isInTransaction(store), "%r should not be joined to the transaction" % store) def test_wb_store_joins_transaction_on_register_event(self): """The Store joins the transaction when register-transaction is emitted. The Store tests check the various operations that trigger this event. """ store = self.zstorm.get("name", "sqlite:") self.assertNotInTransaction(store) store._event.emit("register-transaction") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_commit(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.commit() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_abort(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.abort() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_tpc_commit(self): """ A store used after a two-phase commit re-joins the new transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") store.begin = lambda xid: None store.prepare = lambda: None store.commit = lambda: None store.execute("SELECT 1") transaction.commit() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_tpc_abort(self): """ A store used after a rollback during a two-phase commit re-joins the new transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") store.begin = lambda xid: None store.prepare = lambda: None store.rollback = lambda: None store.execute("SELECT 1") transaction.abort() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_remove(self): removed_store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(removed_store) for name, store in self.zstorm.iterstores(): self.assertNotEquals(store, removed_store) self.assertRaises(ZStormError, self.zstorm.get, "name") def test_wb_removed_store_does_not_join_transaction(self): """If a store has been removed, it will not join the transaction.""" store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(store) store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_removed_store_does_not_join_future_transactions(self): """If a store has been removed after joining a transaction, it will not join new transactions.""" store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") self.zstorm.remove(store) self.assertInTransaction(store) transaction.abort() store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_cross_thread_store_does_not_join_transaction(self): """If a zstorm registered thread crosses over to another thread, it will not be usable.""" store = self.zstorm.get("name", "sqlite:") failures = [] def f(): # We perform this twice to show that ZStormError is raised # consistently (i.e. not just the first time). for i in range(2): try: store.execute("SELECT 1") except ZStormError: failures.append("ZStormError raised") except Exception, exc: failures.append("Expected ZStormError, got %r" % exc) else: failures.append("Expected ZStormError, nothing raised") if self._isInTransaction(store): failures.append("store was joined to transaction") thread = threading.Thread(target=f) thread.start() thread.join() self.assertEqual(failures, ["ZStormError raised"] * 2)
import transaction from psycopg2.extensions import TransactionRollbackError from storm.databases.postgres import PostgresTimeoutTracer from storm.exceptions import DisconnectionError, TimeoutError, ProgrammingError from storm.tracer import install_tracer, remove_tracer_type, get_tracers from storm.zope.zstorm import ZStorm from backends.db.errors import ( IntegrityError, NoTimeoutTracer, RetryLimitReached, ) filesync_tm = transaction filesync_zstorm = ZStorm() filesync_zstorm.transaction_manager = filesync_tm # these are the default retryable exceptions RETRYABLE_EXCEPTIONS = ( DisconnectionError, TransactionRollbackError, psycopg2.InternalError, psycopg2.OperationalError, ) # This is the maximum time a transaction can take before pgkillactive kills it. TRANSACTION_MAX_TIME = 600 def retryable_transaction(max_time=4.0, max_retries=3, variance=0.5,
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: zstorm = global_zstorm else: zstorm = ZStorm() schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{ "name": name, "uri": uri, "schema": schema } for name, (uri, schema) in databases.iteritems()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(zstorm) for database in databases: name = database["name"] uri = database["uri"] zstorm.set_default_uri(name, uri) schema = database.get("schema") if schema is None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) continue schema_uri = database.get("schema-uri", uri) self._schemas[name] = schema schema_zstorm.set_default_uri(name, schema_uri) store = zstorm.get(name) self._set_commit_proxy(store) schema_store = schema_zstorm.get(name) # Disable schema autocommits, we will commit everything at once schema.autocommit(False) try: schema.upgrade(schema_store) except UnknownPatchError: schema.drop(schema_store) schema_store.commit() schema.upgrade(schema_store) else: # Clean up tables here to ensure that the first test run # starts with an empty db schema.delete(schema_store) # Commit all schema changes across all stores transaction.commit() self._zstorm = zstorm self._schema_zstorm = schema_zstorm elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
class ZStormResourceManager(TestResourceManager): """Provide a L{ZStorm} resource to be used in test cases. The constructor is passed the details of the L{Store}s to be registered in the provided L{ZStore} resource. Then the C{make} and C{clean} methods make sure that such L{Store}s are properly setup and cleaned for each test. @param databases: A C{list} of C{dict}s holding the following keys: - 'name', the name of the store to be registered. - 'uri', the database URI to use to create the store. - 'schema', optionally, the L{Schema} for the tables in the store, if not given no schema will be applied. - 'schema-uri', optionally an alternate URI to use for applying the schema, if not given it defaults to 'uri'. @ivar force_delete: If C{True} for running L{Schema.delete} on a L{Store} even if no commit was performed by the test. Useful when running a test in a subprocess that might commit behind our back. @ivar use_global_zstorm: If C{True} then the C{global_zstorm} object from C{storm.zope.zstorm} will be used, instead of creating a new one. This is useful for code loading the zcml directives of C{storm.zope}. @ivar schema_stamp_dir: Optionally, a path to a directory that will be used to save timestamps of the schema's patch packages, so schema upgrades will be performed only when needed. This is just an optimisation to let the resource setup a bit faster. """ force_delete = False use_global_zstorm = False schema_stamp_dir = None def __init__(self, databases): super(ZStormResourceManager, self).__init__() self._databases = databases self._zstorm = None self._schema_zstorm = None self._commits = {} self._schemas = {} def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: self._zstorm = global_zstorm else: self._zstorm = ZStorm() self._schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{"name": name, "uri": uri, "schema": schema} for name, (uri, schema) in databases.iteritems()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(self._zstorm) self._set_create_hook() for database in databases: name = database["name"] uri = database["uri"] schema = database.get("schema") schema_uri = database.get("schema-uri", uri) self._zstorm.set_default_uri(name, uri) if schema is not None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) self._schemas[name] = schema self._schema_zstorm.set_default_uri(name, schema_uri) self._ensure_schema(name, schema) # Commit all schema changes across all stores transaction.commit() elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm def _set_create_hook(self): """ Set a hook in ZStorm.create, so we can lazily set commit proxies. """ self._zstorm.__real_create__ = self._zstorm.create def create_hook(name, uri=None): store = self._zstorm.__real_create__(name, uri=uri) if self._schemas.get(name) is not None: # Only set commit proxies for databases that have a schema # that we can use for cleanup self._set_commit_proxy(store) return store self._zstorm.create = create_hook def _set_commit_proxy(self, store): """Set a commit proxy to keep track of commits and clean up the tables. @param store: The L{Store} to set the commit proxy on. Any commit on this store will result in the associated tables to be cleaned upon tear down. """ store.__real_commit__ = store.commit def commit_proxy(): self._commits[store] = True store.__real_commit__() store.commit = commit_proxy def _ensure_schema(self, name, schema): """Ensure that the schema for the given database is up-to-date. As an optimisation, if the C{schema_stamp_dir} attribute is set, then this method performs a fast check based on the patch directory timestamp rather than the database patch table, so connections and upgrade queries can be skipped if there's no need. @param name: The name of the database to check. @param schema: The schema to be ensured. """ # If a schema stamp directory is set, then figure out whether there's # need to upgrade the schema by looking at timestamps. if self.schema_stamp_dir is not None: schema_mtime = self._get_schema_mtime(schema) schema_stamp_mtime = self._get_schema_stamp_mtime(name) # The modification time of the schema's patch directory matches our # timestamp, so the schema is already up-to-date if schema_mtime == schema_stamp_mtime: return # Save the modification time of the schema's patch directory so in # subsequent runs we'll know if we're already up-to-date self._set_schema_stamp_mtime(name, schema_mtime) schema_store = self._schema_zstorm.get(name) # Disable schema autocommits, we will commit everything at once schema.autocommit(False) try: schema.upgrade(schema_store) except UnknownPatchError: schema.drop(schema_store) schema_store.commit() schema.upgrade(schema_store) else: # Clean up tables here to ensure that the first test run starts # with an empty db schema.delete(schema_store) def _get_schema_mtime(self, schema): """ Return the modification time of the C{schema}'s patch directory. """ schema_stat = os.stat(os.path.dirname(schema._patch_package.__file__)) return int(schema_stat.st_mtime) def _get_schema_stamp_mtime(self, name): """ Return the modification time of schemas's patch directory, as saved in the stamp directory. """ # Let's create the stamp directory if it doesn't exist if not os.path.exists(self.schema_stamp_dir): os.makedirs(self.schema_stamp_dir) schema_stamp_path = os.path.join(self.schema_stamp_dir, name) # Get the last schema modification time we ran the upgrade for, or -1 # if this is our first run if os.path.exists(schema_stamp_path): with open(schema_stamp_path) as fd: schema_stamp_mtime = int(fd.read()) else: schema_stamp_mtime = -1 return schema_stamp_mtime def _set_schema_stamp_mtime(self, name, schema_mtime): """ Save the schema's modification time in the stamp directory. """ schema_stamp_path = os.path.join(self.schema_stamp_dir, name) with open(schema_stamp_path, "w") as fd: fd.write("%d" % schema_mtime) def clean(self, resource): """Clean up the stores after a test.""" try: for name, store in self._zstorm.iterstores(): # Ensure that the store is in a consistent state store.flush() # Clear the alive cache *before* abort is called, # to prevent a useless loop in Store.invalidate # over the alive objects store._alive.clear() finally: transaction.abort() # Clean up tables after each test if a commit was made needs_commit = False for name, store in self._zstorm.iterstores(): if self.force_delete or store in self._commits: schema_store = self._schema_zstorm.get(name) schema = self._schemas[name] schema.delete(schema_store) needs_commit = True if needs_commit: transaction.commit() self._commits = {}
class ZStormTest(TestHelper): def is_supported(self): return has_transaction def setUp(self): self.zstorm = ZStorm() def tearDown(self): # Reset the utility to cleanup the StoreSynchronizer's from the # transaction. self.zstorm._reset() # Free the transaction to avoid having errors that cross # test cases. transaction.manager.free(transaction.get()) def test_create(self): store = self.zstorm.create(None, "sqlite:") self.assertTrue(isinstance(store, Store)) def test_create_twice_unnamed(self): store = self.zstorm.create(None, "sqlite:") store.execute("CREATE TABLE test (id INTEGER)") store.commit() store = self.zstorm.create(None, "sqlite:") self.assertRaises(OperationalError, store.execute, "SELECT * FROM test") def test_create_twice_same_name(self): store = self.zstorm.create("name", "sqlite:") self.assertRaises(ZStormError, self.zstorm.create, "name", "sqlite:") def test_create_and_get_named(self): store = self.zstorm.create("name", "sqlite:") self.assertTrue(self.zstorm.get("name") is store) def test_create_and_get_named_another_thread(self): store = self.zstorm.create("name", "sqlite:") raised = [] def f(): try: self.zstorm.get("name") except ZStormError: raised.append(True) thread = threading.Thread(target=f) thread.start() thread.join() self.assertTrue(raised) def test_get_unexistent(self): self.assertRaises(ZStormError, self.zstorm.get, "name") def test_get_with_uri(self): store = self.zstorm.get("name", "sqlite:") self.assertTrue(isinstance(store, Store)) self.assertTrue(self.zstorm.get("name") is store) self.assertTrue(self.zstorm.get("name", "sqlite:") is store) def test_set_default_uri(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.get("name") self.assertTrue(isinstance(store, Store)) def test_create_default(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.create("name") self.assertTrue(isinstance(store, Store)) def test_create_default_twice(self): self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.create("name") self.assertRaises(ZStormError, self.zstorm.create, "name") def test_iterstores(self): store1 = self.zstorm.create(None, "sqlite:") store2 = self.zstorm.create(None, "sqlite:") store3 = self.zstorm.create("name", "sqlite:") stores = [] for name, store in self.zstorm.iterstores(): stores.append((name, store)) self.assertEquals(len(stores), 3) self.assertEquals(set(stores), set([(None, store1), (None, store2), ("name", store3)])) def test_get_name(self): store = self.zstorm.create("name", "sqlite:") self.assertEquals(self.zstorm.get_name(store), "name") def test_get_name_with_removed_store(self): store = self.zstorm.create("name", "sqlite:") self.assertEquals(self.zstorm.get_name(store), "name") self.zstorm.remove(store) self.assertEquals(self.zstorm.get_name(store), None) def test_default_databases(self): self.zstorm.set_default_uri("name1", "sqlite:1") self.zstorm.set_default_uri("name2", "sqlite:2") self.zstorm.set_default_uri("name3", "sqlite:3") default_uris = self.zstorm.get_default_uris() self.assertEquals(default_uris, {"name1": "sqlite:1", "name2": "sqlite:2", "name3": "sqlite:3"}) def _isInTransaction(self, store): """Check if a Store is part of the current transaction.""" for dm in transaction.get()._resources: if isinstance(dm, StoreDataManager) and dm._store is store: return True return False def assertInTransaction(self, store): """Check that the given store is joined to the transaction.""" self.assertTrue(self._isInTransaction(store), "%r should be joined to the transaction" % store) def assertNotInTransaction(self, store): """Check that the given store is not joined to the transaction.""" self.assertTrue(not self._isInTransaction(store), "%r should not be joined to the transaction" % store) def test_wb_store_joins_transaction_on_register_event(self): """The Store joins the transaction when register-transaction is emitted. The Store tests check the various operations that trigger this event. """ store = self.zstorm.get("name", "sqlite:") self.assertNotInTransaction(store) store._event.emit("register-transaction") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_commit(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.commit() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_abort(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.abort() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_remove(self): removed_store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(removed_store) for name, store in self.zstorm.iterstores(): self.assertNotEquals(store, removed_store) self.assertRaises(ZStormError, self.zstorm.get, "name") def test_wb_removed_store_does_not_join_transaction(self): """If a store has been removed, it will not join the transaction.""" store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(store) store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_removed_store_does_not_join_future_transactions(self): """If a store has been removed after joining a transaction, it will not join new transactions.""" store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") self.zstorm.remove(store) self.assertInTransaction(store) transaction.abort() store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_cross_thread_store_does_not_join_transaction(self): """If a zstorm registered thread crosses over to another thread, it will not be usable.""" store = self.zstorm.get("name", "sqlite:") failures = [] def f(): # We perform this twice to show that ZStormError is raised # consistently (i.e. not just the first time). for i in range(2): try: store.execute("SELECT 1") except ZStormError: failures.append("ZStormError raised") except Exception, exc: failures.append("Expected ZStormError, got %r" % exc) else: failures.append("Expected ZStormError, nothing raised") if self._isInTransaction(store): failures.append("store was joined to transaction") thread = threading.Thread(target=f) thread.start() thread.join() self.assertEqual(failures, ["ZStormError raised"] * 2)
from storm.exceptions import DisconnectionError, TimeoutError, ProgrammingError from storm.tracer import install_tracer, remove_tracer_type, get_tracers from storm.zope.zstorm import ZStorm from backends.db.errors import ( IntegrityError, NoTimeoutTracer, RetryLimitReached, ) # # Since some of the account data is maintained by django, the basic # transaction manager is used as this is what storm.django uses # account_tm = transaction account_zstorm = ZStorm() account_zstorm.transaction_manager = account_tm storage_tm = transaction.ThreadTransactionManager() storage_zstorm = ZStorm() storage_zstorm.transaction_manager = storage_tm # these are the default retryable exceptions RETRYABLE_EXCEPTIONS = ( DisconnectionError, TransactionRollbackError, psycopg2.InternalError, psycopg2.OperationalError, ) # This is the maximum time a transaction can take before pgkillactive kills it.
class PatchTest(MakePath): """Test to test the patch applier""" def add_module(self, module_filename, contents): """Adds a test patch module to the patch package""" filename = os.path.join(self.pkgdir, module_filename) file = open(filename, "w") file.write(contents) file.close() def remove_all_modules(self): """Utility to remove all the modules in a patch package""" for filename in os.listdir(self.pkgdir): os.unlink(os.path.join(self.pkgdir, filename)) def setUp(self): super(PatchTest, self).setUp() self.zstorm = ZStorm() self.patchdir = self.make_path() self.pkgdir = os.path.join(self.patchdir, "mypackage") os.makedirs(self.pkgdir) f = open(os.path.join(self.pkgdir, "__init__.py"), "w") f.write("shared_data = []") f.close() # Order of creation here is important to try to screw up the # patch ordering, as os.listdir returns in order of mtime (or # something). for pname, data in [("patch_380.py", patch_test_1), ("patch_42.py", patch_test_0)]: self.add_module(pname, data) sys.path.append(self.patchdir) self.filename = self.make_path() self.uri = "sqlite:///%s" % self.filename self.store = self.zstorm.create(None, self.uri) self.store.execute("CREATE TABLE patch " "(version INTEGER NOT NULL PRIMARY KEY)") self.assertFalse(self.store.get(Patch, (42))) self.assertFalse(self.store.get(Patch, (380))) import mypackage self.mypackage = mypackage self.patch_applier = PatchApplier(self.store, self.mypackage) # Create another store just to keep track of the state of the # whole transaction manager. See the assertion functions below. self.another_store = self.zstorm.create(None, "sqlite:") self.another_store.execute("CREATE TABLE test (id INT)") self.another_store.commit() self.prepare_for_transaction_check() def tearDown(self): super(PatchTest, self).tearDown() transaction.abort() self.zstorm._reset() sys.path.remove(self.patchdir) for name in list(sys.modules): if name == "mypackage" or name.startswith("mypackage."): del sys.modules[name] def prepare_for_transaction_check(self): """Prepare to see if transactions committed or aborted""" self.another_store.execute("DELETE FROM test") self.another_store.execute("INSERT INTO test VALUES (1)") def assert_transaction_committed(self): """Make sure a transaction commited""" self.another_store.rollback() result = self.another_store.execute("SELECT * FROM test").get_one() self.assertEquals(result, (1,), "Transaction manager wasn't committed.") def assert_transaction_aborted(self): """Make sure a transaction aborted""" self.another_store.commit() result = self.another_store.execute("SELECT * FROM test").get_one() self.assertEquals(result, None, "Transaction manager wasn't aborted.") def test_apply(self): """Apply a patch""" self.patch_applier.apply(42) x = getattr(self.mypackage, "patch_42").x self.assertEquals(x, 42) self.assertTrue(self.store.get(Patch, (42))) self.assertTrue("mypackage.patch_42" in sys.modules) self.assert_transaction_committed() def test_apply_all(self): """Apply all patches""" self.patch_applier.apply_all() self.assertTrue("mypackage.patch_42" in sys.modules) self.assertTrue("mypackage.patch_380" in sys.modules) x = getattr(self.mypackage, "patch_42").x y = getattr(self.mypackage, "patch_380").y self.assertEquals(x, 42) self.assertEquals(y, 380) self.assert_transaction_committed() def test_apply_exploding_patch(self): """Apply a bad patch and make sure the transaction aborts""" self.remove_all_modules() self.add_module("patch_666.py", patch_explosion) self.assertRaises(StormError, self.patch_applier.apply, 666) self.assert_transaction_aborted() def test_wb_apply_all_exploding_patch(self): """ When a patch explodes the store is rolled back to make sure that any changes the patch made to the database are removed. Any other patches that have been applied successfully before it should not be rolled back. Any patches pending after the exploding patch should remain unapplied. """ self.add_module("patch_666.py", patch_explosion) self.add_module("patch_667.py", patch_after_explosion) self.assertEquals(list(self.patch_applier._get_unapplied_versions()), [42, 380, 666, 667]) self.assertRaises(StormError, self.patch_applier.apply_all) self.assertEquals(list(self.patch_applier._get_unapplied_versions()), [666, 667]) def test_mark_applied(self): """Make sure a patch are put in the patch table""" self.patch_applier.mark_applied(42) self.assertFalse("mypackage.patch_42" in sys.modules) self.assertFalse("mypackage.patch_380" in sys.modules) self.assertTrue(self.store.get(Patch, 42)) self.assertFalse(self.store.get(Patch, 380)) self.assert_transaction_committed() def test_mark_applied_all(self): """Make sure all patches are put in the patch table""" self.patch_applier.mark_applied_all() self.assertFalse("mypackage.patch_42" in sys.modules) self.assertFalse("mypackage.patch_380" in sys.modules) self.assertTrue(self.store.get(Patch, 42)) self.assertTrue(self.store.get(Patch, 380)) self.assert_transaction_committed() def test_application_order(self): """Make sure the patches are run in the right order not alpha""" self.patch_applier.apply_all() self.assertEquals(self.mypackage.shared_data, [42, 380]) def test_has_pending_patches(self): """Make sure we can tell when patches are pending""" self.assertTrue(self.patch_applier.has_pending_patches()) self.patch_applier.apply_all() self.assertFalse(self.patch_applier.has_pending_patches()) def test_abort_if_unknown_patches(self): """Test abort when a patch number exists in the table, but there is no matching patch module """ self.patch_applier.mark_applied(381) self.assertRaises(UnknownPatchError, self.patch_applier.apply_all) def test_get_unknown_patch_versions(self): """Get a list of unknown patches""" patches = [Patch(42), Patch(380), Patch(381)] my_store = MockPatchStore("database", patches=patches) patch_applier = PatchApplier(my_store, self.mypackage) self.assertEqual(set([381]), patch_applier.get_unknown_patch_versions()) def test_no_unknown_patch_versions(self): """When no patches are unapplied, we should get an empty set""" patches = [Patch(42), Patch(380)] my_store = MockPatchStore("database", patches=patches) patch_applier = PatchApplier(my_store, self.mypackage) self.assertEqual(set(), patch_applier.get_unknown_patch_versions()) def test_patch_with_incorrect_apply(self): """make sure BadPatchError returns the right information""" self.add_module("patch_999.py", patch_no_args_apply) try: self.patch_applier.apply_all() except BadPatchError, e: self.assertTrue("mypackage/patch_999.py" in str(e)) self.assertTrue("takes no arguments" in str(e)) self.assertTrue("TypeError" in str(e)) else:
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: zstorm = global_zstorm else: zstorm = ZStorm() schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{"name": name, "uri": uri, "schema": schema} for name, (uri, schema) in databases.iteritems()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(zstorm) for database in databases: name = database["name"] uri = database["uri"] zstorm.set_default_uri(name, uri) schema = database.get("schema") if schema is None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) continue schema_uri = database.get("schema-uri", uri) self._schemas[name] = schema schema_zstorm.set_default_uri(name, schema_uri) store = zstorm.get(name) self._set_commit_proxy(store) schema_store = schema_zstorm.get(name) # Disable schema autocommits, we will commit everything at once schema.autocommit(False) try: schema.upgrade(schema_store) except UnknownPatchError: schema.drop(schema_store) schema_store.commit() schema.upgrade(schema_store) else: # Clean up tables here to ensure that the first test run # starts with an empty db schema.delete(schema_store) # Commit all schema changes across all stores transaction.commit() self._zstorm = zstorm self._schema_zstorm = schema_zstorm elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm
def setUp(self): self.zstorm = ZStorm()
class ZStormTest(TestHelper): def is_supported(self): return has_transaction def setUp(self): self.zstorm = ZStorm() def tearDown(self): # Reset the utility to cleanup the StoreSynchronizer's from the # transaction. self.zstorm._reset() # Free the transaction to avoid having errors that cross # test cases. # XXX cjwatson 2019-05-29: transaction 2.4.0 changed # ThreadTransactionManager to wrap TransactionManager rather than # inheriting from it. For now, cope with either. Simplify this # once transaction 2.4.0 is old enough that we can reasonably just # test-depend on it. manager = transaction.manager if isinstance(manager, ThreadTransactionManager): try: manager.free except AttributeError: # transaction >= 2.4.0 manager = manager.manager manager.free(transaction.get()) def test_create(self): store = self.zstorm.create(None, "sqlite:") self.assertTrue(isinstance(store, Store)) def test_create_twice_unnamed(self): store = self.zstorm.create(None, "sqlite:") store.execute("CREATE TABLE test (id INTEGER)") store.commit() store = self.zstorm.create(None, "sqlite:") self.assertRaises(OperationalError, store.execute, "SELECT * FROM test") def test_create_twice_same_name(self): store = self.zstorm.create("name", "sqlite:") self.assertRaises(ZStormError, self.zstorm.create, "name", "sqlite:") def test_create_and_get_named(self): store = self.zstorm.create("name", "sqlite:") self.assertTrue(self.zstorm.get("name") is store) def test_create_and_get_named_another_thread(self): store = self.zstorm.create("name", "sqlite:") raised = [] def f(): try: self.zstorm.get("name") except ZStormError: raised.append(True) thread = threading.Thread(target=f) thread.start() thread.join() self.assertTrue(raised) def test_get_unexistent(self): self.assertRaises(ZStormError, self.zstorm.get, "name") def test_get_with_uri(self): store = self.zstorm.get("name", "sqlite:") self.assertTrue(isinstance(store, Store)) self.assertTrue(self.zstorm.get("name") is store) self.assertTrue(self.zstorm.get("name", "sqlite:") is store) def test_set_default_uri(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.get("name") self.assertTrue(isinstance(store, Store)) def test_create_default(self): self.zstorm.set_default_uri("name", "sqlite:") store = self.zstorm.create("name") self.assertTrue(isinstance(store, Store)) def test_create_default_twice(self): self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.create("name") self.assertRaises(ZStormError, self.zstorm.create, "name") def test_iterstores(self): store1 = self.zstorm.create(None, "sqlite:") store2 = self.zstorm.create(None, "sqlite:") store3 = self.zstorm.create("name", "sqlite:") stores = [] for name, store in self.zstorm.iterstores(): stores.append((name, store)) self.assertEqual(len(stores), 3) self.assertEqual(set(stores), set([(None, store1), (None, store2), ("name", store3)])) def test_get_name(self): store = self.zstorm.create("name", "sqlite:") self.assertEqual(self.zstorm.get_name(store), "name") def test_get_name_with_removed_store(self): store = self.zstorm.create("name", "sqlite:") self.assertEqual(self.zstorm.get_name(store), "name") self.zstorm.remove(store) self.assertEqual(self.zstorm.get_name(store), None) def test_default_databases(self): self.zstorm.set_default_uri("name1", "sqlite:1") self.zstorm.set_default_uri("name2", "sqlite:2") self.zstorm.set_default_uri("name3", "sqlite:3") default_uris = self.zstorm.get_default_uris() self.assertEqual(default_uris, {"name1": "sqlite:1", "name2": "sqlite:2", "name3": "sqlite:3"}) def test_register_store_for_tpc_transaction(self): """ Setting a store to use two-phase-commit mode, makes ZStorm call its begin() method when it joins the transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") xids = [] store.begin = lambda xid: xids.append(xid) store.execute("SELECT 1") [xid] = xids self.assertEqual(0, xid.format_id) self.assertEqual("_storm", xid.global_transaction_id[:6]) self.assertEqual("name", xid.branch_qualifier) def test_register_store_for_tpc_transaction_uses_per_transaction_id(self): """ Two stores in two-phase-commit mode joining the same transaction share the same global transaction ID. """ self.zstorm.set_default_uri("name1", "sqlite:///%s" % self.makeFile()) self.zstorm.set_default_uri("name2", "sqlite:///%s" % self.makeFile()) self.zstorm.set_default_tpc("name1", True) self.zstorm.set_default_tpc("name2", True) store1 = self.zstorm.get("name1") store2 = self.zstorm.get("name2") xids = [] store1.begin = lambda xid: xids.append(xid) store2.begin = lambda xid: xids.append(xid) store1.execute("SELECT 1") store2.execute("SELECT 1") [xid1, xid2] = xids self.assertEqual(xid1.global_transaction_id, xid2.global_transaction_id) def test_register_store_for_tpc_transaction_uses_unique_global_ids(self): """ Each global transaction gets assigned a unique ID. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") xids = [] store.begin = lambda xid: xids.append(xid) store.execute("SELECT 1") transaction.abort() store.execute("SELECT 1") transaction.abort() [xid1, xid2] = xids self.assertNotEqual(xid1.global_transaction_id, xid2.global_transaction_id) def test_transaction_with_two_phase_commit(self): """ If a store is set to use TPC, than the associated data manager will call its prepare() and commit() methods when committing. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") calls = [] store.begin = lambda xid: calls.append("begin") store.prepare = lambda: calls.append("prepare") store.commit = lambda: calls.append("commit") store.execute("SELECT 1") transaction.commit() self.assertEqual(["begin", "prepare", "commit"], calls) def test_transaction_with_single_and_two_phase_commit_stores(self): """ When there are both stores in single-phase and two-phase mode, the ones in single-phase mode are committed first. This makes it possible to actually achieve two-phase commit behavior when only one store doesn't support TPC. """ self.zstorm.set_default_uri("name1", "sqlite:///%s" % self.makeFile()) self.zstorm.set_default_uri("name2", "sqlite:///%s" % self.makeFile()) self.zstorm.set_default_tpc("name1", True) self.zstorm.set_default_tpc("name2", False) store1 = self.zstorm.get("name1") store2 = self.zstorm.get("name2") commits = [] store1.begin = lambda xid: None store1.prepare = lambda: None store1.commit = lambda: commits.append("commit1") store2.commit = lambda: commits.append("commit2") store1.execute("SELECT 1") store2.execute("SELECT 1") transaction.commit() self.assertEqual(["commit2", "commit1"], commits) def _isInTransaction(self, store): """Check if a Store is part of the current transaction.""" for dm in transaction.get()._resources: if isinstance(dm, StoreDataManager) and dm._store is store: return True return False def assertInTransaction(self, store): """Check that the given store is joined to the transaction.""" self.assertTrue(self._isInTransaction(store), "%r should be joined to the transaction" % store) def assertNotInTransaction(self, store): """Check that the given store is not joined to the transaction.""" self.assertTrue(not self._isInTransaction(store), "%r should not be joined to the transaction" % store) def test_wb_store_joins_transaction_on_register_event(self): """The Store joins the transaction when register-transaction is emitted. The Store tests check the various operations that trigger this event. """ store = self.zstorm.get("name", "sqlite:") self.assertNotInTransaction(store) store._event.emit("register-transaction") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_commit(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.commit() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_abort(self): store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") transaction.abort() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_tpc_commit(self): """ A store used after a two-phase commit re-joins the new transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") store.begin = lambda xid: None store.prepare = lambda: None store.commit = lambda: None store.execute("SELECT 1") transaction.commit() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_wb_store_joins_transaction_on_use_after_tpc_abort(self): """ A store used after a rollback during a two-phase commit re-joins the new transaction. """ self.zstorm.set_default_uri("name", "sqlite:") self.zstorm.set_default_tpc("name", True) store = self.zstorm.get("name") store.begin = lambda xid: None store.prepare = lambda: None store.rollback = lambda: None store.execute("SELECT 1") transaction.abort() self.assertNotInTransaction(store) store.execute("SELECT 1") self.assertInTransaction(store) def test_remove(self): removed_store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(removed_store) for name, store in self.zstorm.iterstores(): self.assertNotEqual(store, removed_store) self.assertRaises(ZStormError, self.zstorm.get, "name") def test_wb_removed_store_does_not_join_transaction(self): """If a store has been removed, it will not join the transaction.""" store = self.zstorm.get("name", "sqlite:") self.zstorm.remove(store) store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_removed_store_does_not_join_future_transactions(self): """If a store has been removed after joining a transaction, it will not join new transactions.""" store = self.zstorm.get("name", "sqlite:") store.execute("SELECT 1") self.zstorm.remove(store) self.assertInTransaction(store) transaction.abort() store.execute("SELECT 1") self.assertNotInTransaction(store) def test_wb_cross_thread_store_does_not_join_transaction(self): """If a zstorm registered thread crosses over to another thread, it will not be usable.""" store = self.zstorm.get("name", "sqlite:") failures = [] def f(): # We perform this twice to show that ZStormError is raised # consistently (i.e. not just the first time). for i in range(2): try: store.execute("SELECT 1") except ZStormError: failures.append("ZStormError raised") except Exception as exc: failures.append("Expected ZStormError, got %r" % exc) else: failures.append("Expected ZStormError, nothing raised") if self._isInTransaction(store): failures.append("store was joined to transaction") thread = threading.Thread(target=f) thread.start() thread.join() self.assertEqual(failures, ["ZStormError raised"] * 2) def test_wb_reset(self): """_reset is used to reset the zstorm utility between zope test runs. """ store = self.zstorm.get("name", "sqlite:") self.zstorm._reset() self.assertEqual(list(self.zstorm.iterstores()), []) def test_store_strong_reference(self): """ The zstorm utility should be a strong reference to named stores so that it doesn't recreate stores uselessly. """ store = self.zstorm.get("name", "sqlite:") store_ref = weakref.ref(store) transaction.abort() del store gc.collect() self.assertNotIdentical(store_ref(), None) store = self.zstorm.get("name") self.assertIdentical(store_ref(), store)
def test_utility(self): provideUtility(ZStorm()) self.assertTrue(isinstance(getUtility(IZStorm), ZStorm))
class ZStormResourceManager(TestResourceManager): """Provide a L{ZStorm} resource to be used in test cases. The constructor is passed the details of the L{Store}s to be registered in the provided L{ZStore} resource. Then the C{make} and C{clean} methods make sure that such L{Store}s are properly setup and cleaned for each test. @param databases: A C{list} of C{dict}s holding the following keys: - 'name', the name of the store to be registered. - 'uri', the database URI to use to create the store. - 'schema', optionally, the L{Schema} for the tables in the store, if not given no schema will be applied. - 'schema-uri', optionally an alternate URI to use for applying the schema, if not given it defaults to 'uri'. @ivar force_delete: If C{True} for running L{Schema.delete} on a L{Store} even if no commit was performed by the test. Useful when running a test in a subprocess that might commit behind our back. @ivar use_global_zstorm: If C{True} then the C{global_zstorm} object from C{storm.zope.zstorm} will be used, instead of creating a new one. This is useful for code loading the zcml directives of C{storm.zope}. @ivar schema_stamp_dir: Optionally, a path to a directory that will be used to save timestamps of the schema's patch packages, so schema upgrades will be performed only when needed. This is just an optimisation to let the resource setup a bit faster. @ivar vertical_patching: If C{True}, patches will be applied "vertically", meaning that all patches for the first store will be applied, then all patches for the second store etc. Otherwise, if set to C{False} patches will be applied "horizontally" (see L{Sharding.upgrade}). The default is C{True} just because of backward-compatibility, but normally you should set it to C{False}. """ force_delete = False use_global_zstorm = False schema_stamp_dir = None vertical_patching = True def __init__(self, databases): super(ZStormResourceManager, self).__init__() self._databases = databases self._zstorm = None self._schema_zstorm = None self._commits = {} self._schemas = {} self._sharding = [] def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: self._zstorm = global_zstorm else: self._zstorm = ZStorm() self._schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{ "name": name, "uri": uri, "schema": schema } for name, (uri, schema) in databases.items()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(self._zstorm) self._set_create_hook() enforce_schema = False for database in databases: name = database["name"] uri = database["uri"] schema = database.get("schema") schema_uri = database.get("schema-uri", uri) self._zstorm.set_default_uri(name, uri) if schema is not None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) self._schemas[name] = schema self._schema_zstorm.set_default_uri(name, schema_uri) schema.autocommit(False) store = self._schema_zstorm.get(name) if not self._sharding or self.vertical_patching: self._sharding.append(Sharding()) sharding = self._sharding[-1] sharding.add(store, schema) if self._has_patch_package_changed(name, schema): enforce_schema = True if enforce_schema: for sharding in self._sharding: try: sharding.upgrade() except UnknownPatchError: sharding.drop() sharding.create() except: # An unknown error occured, let's drop all timestamps # so subsequent runs won't assume that everything is # fine self._purge_schema_stamp_dir() raise else: sharding.delete() # Commit all schema changes across all stores transaction.commit() elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm def _set_create_hook(self): """ Set a hook in ZStorm.create, so we can lazily set commit proxies. """ self._zstorm.__real_create__ = self._zstorm.create def create_hook(name, uri=None): store = self._zstorm.__real_create__(name, uri=uri) if self._schemas.get(name) is not None: # Only set commit proxies for databases that have a schema # that we can use for cleanup self._set_commit_proxy(store) return store self._zstorm.create = create_hook def _set_commit_proxy(self, store): """Set a commit proxy to keep track of commits and clean up the tables. @param store: The L{Store} to set the commit proxy on. Any commit on this store will result in the associated tables to be cleaned upon tear down. """ store.__real_commit__ = store.commit def commit_proxy(): self._commits[store] = True store.__real_commit__() store.commit = commit_proxy def _has_patch_package_changed(self, name, schema): """Whether the schema for the given database is up-to-date. As an optimisation, if the C{schema_stamp_dir} attribute is set, then this method performs a fast check based on the patch directory timestamp rather than the database patch table, so connections and upgrade queries can be skipped if there's no need. @param name: The name of the database to check. @param schema: The schema to be ensured. @return: C{True} if the patch directory has changed and the schema needs to be updated, C{False} otherwise. """ # If a schema stamp directory is set, then figure out whether there's # need to upgrade the schema by looking at timestamps. if self.schema_stamp_dir is not None: schema_mtime = self._get_schema_mtime(schema) schema_stamp_mtime = self._get_schema_stamp_mtime(name) # The modification time of the schema's patch directory matches our # timestamp, so the schema is already up-to-date if schema_mtime == schema_stamp_mtime: return False # Save the modification time of the schema's patch directory so in # subsequent runs we'll know if we're already up-to-date self._set_schema_stamp_mtime(name, schema_mtime) return True def _get_schema_mtime(self, schema): """ Return the modification time of the C{schema}'s patch directory. """ patch_directory = os.path.dirname(schema._patch_set._package.__file__) schema_stat = os.stat(patch_directory) return int(schema_stat.st_mtime) def _get_schema_stamp_mtime(self, name): """ Return the modification time of schemas's patch directory, as saved in the stamp directory. """ # Let's create the stamp directory if it doesn't exist if not os.path.exists(self.schema_stamp_dir): os.makedirs(self.schema_stamp_dir) schema_stamp_path = os.path.join(self.schema_stamp_dir, name) # Get the last schema modification time we ran the upgrade for, or -1 # if this is our first run if os.path.exists(schema_stamp_path): with open(schema_stamp_path) as fd: schema_stamp_mtime = int(fd.read()) else: schema_stamp_mtime = -1 return schema_stamp_mtime def _set_schema_stamp_mtime(self, name, schema_mtime): """ Save the schema's modification time in the stamp directory. """ schema_stamp_path = os.path.join(self.schema_stamp_dir, name) with open(schema_stamp_path, "w") as fd: fd.write("%d" % schema_mtime) def _purge_schema_stamp_dir(self): """Remove the stamp directory.""" if self.schema_stamp_dir and os.path.exists(self.schema_stamp_dir): shutil.rmtree(self.schema_stamp_dir) def clean(self, resource): """Clean up the stores after a test.""" try: for name, store in self._zstorm.iterstores(): # Ensure that the store is in a consistent state store.flush() # Clear the alive cache *before* abort is called, # to prevent a useless loop in Store.invalidate # over the alive objects store._alive.clear() finally: transaction.abort() # Clean up tables after each test if a commit was made needs_commit = False for name, store in self._zstorm.iterstores(): if self.force_delete or store in self._commits: schema_store = self._schema_zstorm.get(name) schema = self._schemas[name] schema.delete(schema_store) needs_commit = True if needs_commit: transaction.commit() self._commits = {}
def make(self, dependencies): """Create a L{ZStorm} resource to be used by tests. @return: A L{ZStorm} object that will be shared among all tests using this resource manager. """ if self._zstorm is None: if self.use_global_zstorm: self._zstorm = global_zstorm else: self._zstorm = ZStorm() self._schema_zstorm = ZStorm() databases = self._databases # Adapt the old databases format to the new one, for backward # compatibility. This should be eventually dropped. if isinstance(databases, dict): databases = [{ "name": name, "uri": uri, "schema": schema } for name, (uri, schema) in databases.items()] # Provide the global IZStorm utility before applying patches, so # patch code can get the ztorm object if needed (e.g. looking up # other stores). provideUtility(self._zstorm) self._set_create_hook() enforce_schema = False for database in databases: name = database["name"] uri = database["uri"] schema = database.get("schema") schema_uri = database.get("schema-uri", uri) self._zstorm.set_default_uri(name, uri) if schema is not None: # The configuration for this database does not include a # schema definition, so we just setup the store (the user # code should apply the schema elsewhere, if any) self._schemas[name] = schema self._schema_zstorm.set_default_uri(name, schema_uri) schema.autocommit(False) store = self._schema_zstorm.get(name) if not self._sharding or self.vertical_patching: self._sharding.append(Sharding()) sharding = self._sharding[-1] sharding.add(store, schema) if self._has_patch_package_changed(name, schema): enforce_schema = True if enforce_schema: for sharding in self._sharding: try: sharding.upgrade() except UnknownPatchError: sharding.drop() sharding.create() except: # An unknown error occured, let's drop all timestamps # so subsequent runs won't assume that everything is # fine self._purge_schema_stamp_dir() raise else: sharding.delete() # Commit all schema changes across all stores transaction.commit() elif getUtility(IZStorm) is not self._zstorm: # This probably means that the test code has overwritten our # utility, let's re-register it. provideUtility(self._zstorm) return self._zstorm