def test_query_member(self): with patch('patroni.ctl.get_cursor', Mock(return_value=MockConnect().cursor())): rows = query_member(None, None, None, 'master', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('False' in str(rows)) with patch.object(MockCursor, 'execute', Mock(side_effect=OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) with patch('patroni.ctl.get_cursor', Mock(return_value=None)): rows = query_member(None, None, None, None, 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to' in str(rows)) rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {}) self.assertTrue('No connection to' in str(rows)) with patch('patroni.ctl.get_cursor', Mock(side_effect=OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_catalog.pg_is_in_recovery()', {})
def in_transaction(self, isolation_level=None): trans_conn = getattr(db_context, "cur_transaction", None) if trans_conn: raise OperationalError("Already in a transaction context") conn = self.get() db_context.cur_transaction = conn try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn) db_context.cur_transaction = None
def reconnect(self): if self.conn_conf != None and self.connection != None: self.connection.close() self.connection = connect(**self.conn_conf) self.connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) else: raise OperationalError("Connection configuration is not set!")
def test_doRead_removes_self_from_reactor_on_error(self): listener = PostgresListenerService() connection = self.patch(listener, "connection") connection.connection.poll.side_effect = OperationalError() self.patch(reactor, "removeReader") self.patch(listener, "connectionLost") failure = listener.doRead() # No failure is returned; see the comment in # PostgresListenerService.doRead() that explains why we don't do that. self.assertThat(failure, Is(None)) # The listener has begun disconnecting. self.assertThat(listener.disconnecting, IsInstance(Deferred)) # Wait for disconnection to complete. yield listener.disconnecting # The listener has removed itself from the reactor. self.assertThat(reactor.removeReader, MockCalledOnceWith(listener)) # connectionLost() has been called with a simple ConnectionLost. self.assertThat(listener.connectionLost, MockCalledOnceWith(ANY)) [failure] = listener.connectionLost.call_args[0] self.assertThat(failure, IsInstance(Failure)) self.assertThat(failure.value, IsInstance(error.ConnectionLost))
def retrieve_table_records(self, table_name): if self.cursor != None: #print(f"Select * FROM {table_name};") self.cursor.execute(f"Select * FROM {table_name};") return [i for i in self.cursor] else: raise OperationalError("No live connection to any database")
def connection(self, isolation_level=None): #if DEBUG: _t = - time.time() # TODO: nice way to check own logging.debuglevel ? conn = self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) #if DEBUG: debug("Got connection in %.2f ms" % ((_t + time.time())*1000)) yield conn except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn)
def connection(self, isolation_level=None): conn = self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn)
def get(self): try: return self._pool.get_nowait() except Empty: pass if self._size >= self._maxsize: try: return self._pool.get(timeout=self._maxwait) except Empty: pass # It is posiible that after waiting self._maxwait time, non connection has been returned # because of cleaning up old ones on put(), so there is not connection but also LIFO is not full. # In that case new connection shouls be created, otherwise exception is risen. if self._size >= self._maxsize: raise OperationalError( "Too many connections created: {} (maxsize is {})".format( self._size, self._maxsize)) try: self._size += 1 conn = self.create_connection() except: self._size -= 1 raise now = time.time() self._created_at[id(conn)] = now self._latest_use[id(conn)] = now return conn
def _poll(self): """Poll method for a connected instance Used for executing commands and receiving notify messages """ try: state = self.poll() except Exception as ex: self._stop_writing() # done with error, cleanup and notify waiter if not self._fut.done(): self._fut.set_exception(ex) if self.closed: self.notifies.clear() return if state == POLL_WRITE: self._start_writing(self._poll) return self._stop_writing() if state == POLL_OK: fut = self._fut if not fut.done(): fut.set_result(True) elif state != POLL_READ: # should not happen if not self._fut.done(): self._fut.set_exception( OperationalError( "Unexpected result from poll: {}".format(state)))
def cursor(self, *args, **kwargs): isolation_level = kwargs.pop('isolation_level', None) kwargs.setdefault('cursor_factory', extras.DictCursor) conn = self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn.cursor(*args, **kwargs) except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( 'Cannot commit because connection was closed: %r' % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn)
def cursor(self, *args, **kwargs): isolation_level = kwargs.pop('isolation_level', None) trans_conn = getattr(db_context, "cur_transaction", None) conn = trans_conn if trans_conn else self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) tracer = kwargs.pop("tracer", None) cur = conn.cursor(*args, **kwargs) if isinstance(cur, TracingCursor): cur._tracer = tracer yield cur except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) if not trans_conn: conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) if not trans_conn: self.put(conn)
def _connect_poll(self): """ Poll method for connecting This resets the io notifications after each event, because file descriptor (or underlying socket) might change. """ self._reset_connect() try: state = self.poll() except Exception as ex: if not self._fut.done(): self._fut.set_exception(ex) return self._fd = self.fileno() if state == POLL_WRITE: self._start_writing(self._connect_poll) elif state == POLL_READ: self._start_reading(self._connect_poll) elif state == POLL_OK: # we are connected fut = self._fut if not fut.done(): fut.set_result(True) else: # should not happen if not self._fut.done(): self._fut.set_exception( OperationalError( "Unexpected result from poll: {}".format(state)))
def _load_in_safe_session(self, db, safe_session, filter_eq_dct=None, row_id=None, hash_of=None): with safe_session.set_timer(60 * 5): logger.debug("Query session...") q = db.query(safe_session.session) if row_id is not None: sql_row = q._query.get(row_id) if sql_row is None: raise OperationalError("There is no rows with id \"%d\"" % row_id) sql_rows = [sql_row] elif hash_of is not None: hashcode = sql.hash_state(flatten(hash_of)) sql_rows = q._query.filter(db._Dict.hash == hashcode).all() elif filter_eq_dct is not None: sql_rows = q.filter_eq_dct(filter_eq_dct).all() else: sql_rows = q.all() logger.debug("Query done") return sql_rows
def create_database(force=False): # Connect to the postgres server try: conn = connect(dbname='postgres', user=AMGUT_CONFIG.user, password=AMGUT_CONFIG.password, host=AMGUT_CONFIG.host, port=AMGUT_CONFIG.port) except OperationalError as e: raise OperationalError("Cannot connect to the server, error is %s" % str(e)) # Set the isolation level to AUTOCOMMIT so we can execute a create database # sql query conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Get the cursor cur = conn.cursor() db_exists = _check_db_exists(AMGUT_CONFIG.database, cur) # Check that the database does not already exist if db_exists and force: return elif db_exists: raise ValueError("Database '{}' already present on the system" .format(AMGUT_CONFIG.database)) # Create the database cur.execute('CREATE DATABASE %s' % AMGUT_CONFIG.database) cur.close() conn.close()
def disconnect(self): if self.connection != None: self.connection.close() else: try: raise OperationalError("There is no connection live!") except OperationalError as e: print(e)
def test_reraises_db_connection_error(self, setup_experiment): from psycopg2 import OperationalError with mock.patch("dallinger.deployment.db.check_connection") as checker: checker.side_effect = OperationalError("Boom!") with pytest.raises(Exception) as ex_info: setup_experiment(log=mock.Mock()) assert ex_info.match("Boom!")
def retrieve_table_info(self, table_name): if self.cursor != None: #print(f"Select * FROM {table_name};") self.cursor.execute(f"Select * FROM {table_name};") colnames = [(desc[0], desc[1]) for desc in self.cursor.description] return colnames else: raise OperationalError("No live connection to any database")
def mock_failing_connection(mocker) -> MagicMock: """ async with engine.acquire() as conn: await conn.execute(query) --> will raise OperationalError """ # See http://initd.org/psycopg/docs/module.html conn_execute = mocker.patch.object(SAConnection, "execute") conn_execute.side_effect = OperationalError( "MOCK: server closed the connection unexpectedly") return conn_execute
def from_postgreSQL(database): check_type_of(database) number_of_transactions = 0 number_of_corrupted_records = 0 transactions = [] last_unique_items_of = defaultdict(lambda: defaultdict(lambda: dt.datetime(1, 1, 1))) query = """SELECT %(timestamp)s, %(userid)s, %(articleid)s FROM %(table)s LIMIT %(limit)s""" def process_valid_transaction(record): timestamp, user, item = record time = converted(timestamp) if time > last_unique_items_of[user][item]: last_unique_items_of[user][item] = time transactions.append((time.isoformat(), user, item)) return 1 def log_corrupted_transaction(record): log.warning('Incomplete record returned from database. Skipping.') return 0 process = {True : process_valid_transaction, False: log_corrupted_transaction} try: connection = connect(database.login) except OperationalError: log.error('Failed connecting to {} @{}.'.format(database.login_db_name, database.login_host)) raise OperationalError('Connect to database failed. Check settings!') with connection.cursor() as cursor: try: cursor.execute(query, database._params) except ProgrammingError: log.error('Failed to execute SQL query. Check your parameters!') raise ProgrammingError('SQL query failed. Check your parameters!') else: for record in cursor: complete = all(record) success = process[complete](record) number_of_transactions += success number_of_corrupted_records += 1 - success finally: connection.close() compare(number_of_transactions, database) return (number_of_transactions, number_of_corrupted_records, finalized(last_unique_items_of), transactions)
def test_db_error_on_save(self, db_error_mock): """ Test Rollback on save """ db_error_mock.side_effect = OperationalError() data = { 'product_id': 23, 'rec_type_id': "up-sell", 'rec_product_id': 45, 'weight': .5 } rec = Recommendation() self.assertRaises(DataValidationError, rec.deserialize, data)
def gevent_wait_callback(conn, timeout=None): """A wait callback useful to allow gevent to work with Psycopg.""" while 1: state = conn.poll() if state == extensions.POLL_OK: break elif state == extensions.POLL_READ: wait_read(conn.fileno(), timeout=timeout) elif state == extensions.POLL_WRITE: wait_write(conn.fileno(), timeout=timeout) else: raise OperationalError("Bad result from poll: %r" % state)
def from_postgreSQL(database): check_type_of(database) number_of_transactions = 0 number_of_corrupted_records = 0 userIndex_of = defaultdict(lambda: len(userIndex_of)) itemIndex_of = defaultdict(lambda: len(itemIndex_of)) count_buys_of = defaultdict(int) query = """SELECT %(userid)s, %(articleid)s, COUNT(*) as count FROM (SELECT %(userid)s, %(articleid)s FROM %(table)s LIMIT %(limit)s) AS head GROUP BY %(userid)s, %(articleid)s""" def process_valid_transaction(record): user, item, count = record count_buys_of[(userIndex_of[user], itemIndex_of[item])] = count return 0 def log_corrupted_transaction(record): log.warning('Incomplete record returned from database. Skipping.') return 1 problems_with = { True: process_valid_transaction, False: log_corrupted_transaction } try: connection = connect(database.login) except OperationalError: log.error('Failed connecting to {} @{}.'.format( database.login_db_name, database.login_host)) raise OperationalError('Connect to database failed. Check settings!') with connection.cursor() as cursor: try: cursor.execute(query, database._params) except ProgrammingError: log.error('Failed to execute SQL query. Check your parameters!') raise ProgrammingError('SQL query failed. Check your parameters!') else: for record in cursor: complete = all(record) number_of_corrupted_records += problems_with[complete](record) finally: connection.close() number_of_transactions = sum(count_buys_of.values()) compare(number_of_transactions, database) return (number_of_transactions, number_of_corrupted_records, dict(userIndex_of), dict(itemIndex_of), dict(count_buys_of))
def test_run_query(self): with patch("cescout.projects.ooni.run_query", return_value=self.query): self.assertEqual(ooni.run_query("CN", *self.date_range, **self.config), self.query) with patch("psycopg2.connect") as mock: mock.return_value.cursor.return_value.fetchall.return_value = [("some_value")] self.assertEqual(ooni.run_query("CN", *self.date_range, **self.config), None) ooni_config = {"ooni": self.config} self.assertEqual(ooni.run_query("CN", *self.date_range, **ooni_config), [("some_value")]) mock.return_value.cursor.side_effect = OperationalError() self.assertEqual(ooni.run_query("CN", *self.date_range, **ooni_config), None)
def psycopg2_wait_callback(conn): """A wait callback to allow greenlet to work with Psycopg. The caller must be from a greenlet other than the main one. """ while 1: state = conn.poll() if state == extensions.POLL_OK: # Done with waiting break elif state == extensions.POLL_READ: wait_fd(conn) elif state == extensions.POLL_WRITE: wait_fd(conn, read=False) else: # pragma nocover raise OperationalError("Bad result from poll: %r" % state)
def test_503_page(self): handler500 = self._get_handler500() # to make a mock call to the django view functions you need a request fake_request = RequestFactory().request(**{'wsgi.input': None}) # the reason for first causing an exception to be raised is because # the handler500 function is only called by django when an exception # has been raised which means sys.exc_info() is something. try: raise OperationalError("unable to connect!") except OperationalError: # do this inside a frame that has a sys.exc_info() response = handler500(fake_request) eq_(response.status_code, 503) ok_('Temporarily Unavailable' in response.content)
class CommonErrorStrategyTestCase(TestCase): exception_to_vaue_dict = { InterfaceError(): (True, TERMINATE), DatabaseError(choice(CommonErrorStrategy.BACKOFFABLE_MESSAGE)): (True, BACKOFF), OperationalError(): (True, TERMINATE), Exception(): (False, TERMINATE) } def setUp(self): self.strat = CommonErrorStrategy() def test_strategy_returns_correct_values(self): for exception, value in self.exception_to_vaue_dict.iteritems(): return_value = self.strat.handle_exception(exception) self.assertEqual(return_value, value)
def wait_select(conn): """Wait until a connection or cursor has data available. The function is an example of a wait callback to be registered with `~psycopg2.extensions.set_wait_callback()`. This function uses `!select()` to wait for data available. """ while 1: state = conn.poll() if state == POLL_OK: break elif state == POLL_READ: select.select([conn.fileno()], [], []) elif state == POLL_WRITE: select.select([], [conn.fileno()], []) else: raise OperationalError("bad state from poll: %s" % state)
def save(self, table_name, row): db = self._open_db(table_name) with SafeSession(db) as safe_session: if "id" in row: row_id = row["id"] del row["id"] sql_row = self._load_in_safe_session(db=db, safe_session=safe_session, row_id=row_id)[0] logger.debug("update row %d" % row_id) with safe_session.set_timer(60 * 5): sql_row.update_simple(flatten(row), safe_session.session) else: logger.debug("insert new row") with safe_session.set_timer(60 * 5): sql_row = sql.insert_dict(flatten(row), db, session=safe_session.session) if sql_row is None: raise OperationalError("Identical row already exists") sql_row._set_in_session(sql.JOBID, sql_row.id, safe_session.session) row_id = sql_row.id with safe_session.set_timer(60 * 5): safe_session.session.commit() logger.debug("session commited") # load in new session otherwise lazy attribute selection hangs # on forever... why is that!? with SafeSession(db) as safe_session: sql_row = self._load_in_safe_session(db, safe_session, row_id=row_id)[0] logger.debug("Fetch all row attributes...") eager_dict = self._eager_dicts([sql_row], safe_session)[0] logger.debug("Fetch done") eager_dict['id'] = row_id return eager_dict
def test_basic_postgres_save_processed_operational_error(self): mock_logging = mock.Mock() mock_postgres = mock.Mock() required_config = PostgreSQLCrashStorage.get_required_config() required_config.add_option('logger', default=mock_logging) config_manager = ConfigurationManager( [required_config], app_name='testapp', app_version='1.0', app_description='app description', values_source_list=[{ 'logger': mock_logging, 'database_class': mock_postgres, 'transaction_executor_class': TransactionExecutorWithLimitedBackoff, 'backoff_delays': [0, 0, 0], }]) with config_manager.context() as config: crashstorage = PostgreSQLCrashStorage(config) crashstorage.database.operational_exceptions = (OperationalError, ) database = crashstorage.database.return_value = mock.MagicMock() self.assertTrue(isinstance(database, mock.Mock)) fetch_all_returns = [ ((666, ), ), None, ((23, ), ), ] def fetch_all_func(*args): result = fetch_all_returns.pop(0) return result m = mock.MagicMock() m.__enter__.return_value = m database = crashstorage.database.return_value = m m.cursor.side_effect = OperationalError('bad') self.assertRaises(OperationalError, crashstorage.save_processed, a_processed_crash) self.assertEqual(m.cursor.call_count, 3)
async def _wait_callback(self): """ The wait callback. This callback is used for polling the psycopg2 sockets, waiting until they are ready. """ while True: state = self._connection.poll() if state == POLL_OK: return elif state == POLL_READ: await multio.asynclib.wait_read(self._sock) elif state == POLL_WRITE: await multio.asynclib.wait_write(self._sock) elif state == POLL_ERROR: raise OperationalError("Polling socket returned error")