def start(self): # Removes possible left over cached engine # (needed if non-patched engine is run prior) collectorsdb.resetEngineSingleton() # Override the Repository database name try: self._configPatch.start() self._configPatchApplied = True # Now create the temporary repository database self._attemptedToCreateDatabase = True collectorsdb.reset(suppressPromptAndObliterateDatabase=True) # Verify that the temporary repository database got created numDbFound = self._unaffiliatedEngine.execute( "SELECT COUNT(*) FROM INFORMATION_SCHEMA.SCHEMATA WHERE " "`SCHEMA_NAME` = '{db}'".format( db=self.tempDatabaseName)).scalar() assert numDbFound == 1, ( "Temp repo db={db} not found (numFound={numFound})".format( db=self.tempDatabaseName, numFound=numDbFound)) except: # Attempt to clean up self.stop() raise
def start(self): # Removes possible left over cached engine # (needed if non-patched engine is run prior) collectorsdb.resetEngineSingleton() # Override the Repository database name try: self._configPatch.start() self._configPatchApplied = True # Now create the temporary repository database self._attemptedToCreateDatabase = True collectorsdb.reset(suppressPromptAndObliterateDatabase=True) # Verify that the temporary repository database got created numDbFound = self._unaffiliatedEngine.execute( "SELECT COUNT(*) FROM INFORMATION_SCHEMA.SCHEMATA WHERE " "`SCHEMA_NAME` = '{db}'".format(db=self.tempDatabaseName)).scalar() assert numDbFound == 1, ( "Temp repo db={db} not found (numFound={numFound})".format( db=self.tempDatabaseName, numFound=numDbFound)) except: # Attempt to clean up self.stop() raise
def testTransientErrorRetryDecorator(self): # Setup proxy. We'll patch config later, so we need to cache the values # so that the original proxy may be restarted with the original params config = collectorsdb.CollectorsDbConfig() originalHost = config.get("repository", "host") originalPort = config.getint("repository", "port") def _startProxy(): p = startProxy(originalHost, originalPort, 6033) p.next() return p proxy = _startProxy() self.addCleanup(proxy.send, "kill") # Patch collectorsdb config with local proxy with ConfigAttributePatch(config.CONFIG_NAME, config.baseConfigDir, (("repository", "host", "127.0.0.1"), ("repository", "port", "6033"))): # Force refresh of engine singleton collectorsdb.resetEngineSingleton() engine = collectorsdb.engineFactory() # First, make sure valid query returns expected results res = collectorsdb.retryOnTransientErrors(engine.execute)("select 1") self.assertEqual(res.scalar(), 1) @collectorsdb.retryOnTransientErrors def _killProxyTryRestartProxyAndTryAgain(n=[]): # pylint: disable=W0102 if not n: # Kill the proxy on first attempt proxy.send("kill") proxy.next() try: engine.execute("select 1") self.fail("Proxy did not terminate as expected...") except sqlalchemy.exc.OperationalError: pass n.append(None) elif len(n) == 1: # Restore proxy in second attempt newProxy = _startProxy() self.addCleanup(newProxy.send, "kill") n.append(None) res = engine.execute("select 2") return res # Try again w/ retry decorator result = _killProxyTryRestartProxyAndTryAgain() # Verify that the expected value is eventually returned self.assertEqual(result.scalar(), 2)
def testTransientErrorRetryDecorator(self): # Setup proxy. We'll patch config later, so we need to cache the values # so that the original proxy may be restarted with the original params config = collectorsdb.CollectorsDbConfig() originalHost = config.get("repository", "host") originalPort = config.getint("repository", "port") def _startProxy(): p = startProxy(originalHost, originalPort, 6033) p.next() return p proxy = _startProxy() self.addCleanup(proxy.send, "kill") # Patch collectorsdb config with local proxy with ConfigAttributePatch(config.CONFIG_NAME, config.baseConfigDir, (("repository", "host", "127.0.0.1"), ("repository", "port", "6033"))): # Force refresh of engine singleton collectorsdb.resetEngineSingleton() engine = collectorsdb.engineFactory() # First, make sure valid query returns expected results res = collectorsdb.retryOnTransientErrors( engine.execute)("select 1") self.assertEqual(res.scalar(), 1) @collectorsdb.retryOnTransientErrors def _killProxyTryRestartProxyAndTryAgain(n=[]): # pylint: disable=W0102 if not n: # Kill the proxy on first attempt proxy.send("kill") proxy.next() try: engine.execute("select 1") self.fail("Proxy did not terminate as expected...") except sqlalchemy.exc.OperationalError: pass n.append(None) elif len(n) == 1: # Restore proxy in second attempt newProxy = _startProxy() self.addCleanup(newProxy.send, "kill") n.append(None) res = engine.execute("select 2") return res # Try again w/ retry decorator result = _killProxyTryRestartProxyAndTryAgain() # Verify that the expected value is eventually returned self.assertEqual(result.scalar(), 2)
def stop(self): try: if self._attemptedToCreateDatabase: self._attemptedToCreateDatabase = False # Drop the temporary repository database, if any self._unaffiliatedEngine.execute( "DROP DATABASE IF EXISTS {db}".format(db=self.tempDatabaseName)) finally: if self._configPatchApplied: self._configPatch.stop() collectorsdb.resetEngineSingleton() # Dispose of the unaffiliated engine's connection pool self._unaffiliatedEngine.dispose()
def stop(self): try: if self._attemptedToCreateDatabase: self._attemptedToCreateDatabase = False # Drop the temporary repository database, if any self._unaffiliatedEngine.execute( "DROP DATABASE IF EXISTS {db}".format( db=self.tempDatabaseName)) finally: if self._configPatchApplied: self._configPatch.stop() collectorsdb.resetEngineSingleton() # Dispose of the unaffiliated engine's connection pool self._unaffiliatedEngine.dispose()
def tearDown(self): # Return collectorsdb engine singleton to a pristine state. If running # tests in non-boxed mode, for example, to collect coverage statistics, # this is necessary or else subsequent tests will use the engine singleton # that is configured to use the proxy. collectorsdb.resetEngineSingleton()