def test_logging_on_critical_error(): critical = [ TypeError("Example TypeError"), StatementError(message="?", statement="SELECT *", params={}, orig=None), StatementError( message="?", statement="SELECT *", params={}, orig=_mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) Incorrect string value " "'\\xE7\\x(a\\x84\\xE5'"), ), _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) Incorrect string value " "'\\xE7\\x(a\\x84\\xE5'"), _mysql_exceptions.IntegrityError( "(_mysql_exceptions.IntegrityError) Column not found"), ] for critical_exc in critical: logger = MockLogger() failing_function = FailingFunction(critical_exc, max_executions=2) retry_with_logging(failing_function, logger=logger) assert logger.call_count == 1, "{} should be logged".format( critical_exc) assert failing_function.call_count == 2
async def execute(self, *args, **kwargs): """Like :meth:`Connection.execute <sqlalchemy.engine.Connection.execute>`, but is a coroutine that returns an :class:`AsyncioResultProxy`. Example: .. code-block:: python result = await conn.execute(...) data = await result.fetchall() .. warning:: Make sure to explicitly call :meth:`AsyncioResultProxy.close` if the :class:`~sqlalchemy.engine.ResultProxy` has pending rows remaining otherwise it will be closed during garbage collection. With SQLite, this will raise an exception since the DBAPI connection was created in a different thread. """ try: rp = await self._run_in_thread(self._connection.execute, *args, **kwargs) except AlreadyQuit: raise StatementError("This Connection is closed.", None, None, None) return AsyncResultProxy(rp, self._run_in_thread)
async def _begin(self): try: transaction = await self._run_in_thread(self._connection.begin) except AlreadyQuit: raise StatementError("This Connection is closed.", None, None, None) return AsyncTransaction(transaction, self._run_in_thread)
def execute(self, *args, **kwargs): try: return (self._defer_to_cxn(self._connection.execute, *args, **kwargs).addCallback( TwistedResultProxy, self._defer_to_cxn)) except AlreadyQuit: return fail( StatementError("This Connection is closed.", None, None, None))
async def close(self, *args, **kwargs): """Like :meth:`Connection.close <sqlalchemy.engine.Connection.close>`, but is a coroutine. """ try: res = await self._run_in_thread(self._connection.close, *args, **kwargs) await self._worker.quit() except AlreadyQuit: raise StatementError("This Connection is closed.", None, None, None) return res
def test_no_logging_until_many_transient_error(): transient = [ socket.timeout, socket.error, _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) (1213, 'Deadlock " "found when trying to get lock; try restarting transaction')"), _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) Lost connection to MySQL " "server during query"), _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) MySQL server has gone away." ), _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) Can't connect to MySQL " "server on 127.0.0.1"), _mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) Max connect timeout reached " "while reaching hostgroup 71"), StatementError( message="?", statement="SELECT *", params={}, orig=_mysql_exceptions.OperationalError( "(_mysql_exceptions.OperationalError) MySQL server has gone away." ), ), ] for transient_exc in transient: logger = MockLogger() failing_function = FailingFunction(transient_exc, max_executions=2) retry_with_logging(failing_function, logger=logger) assert logger.call_count == 0, "{} should not be logged".format( transient_exc) assert failing_function.call_count == 2 failing_function = FailingFunction(socket.error, max_executions=21) retry_with_logging(failing_function, logger=logger) assert logger.call_count == 1 assert failing_function.call_count == 21 failing_function = FailingFunction(socket.error, max_executions=2)