def dbcursor_query( query, args=[], onerow=False, # Require exactly one row returned tries=2 # Times to try in the face of errors ): """ Run a query against a cursor, catching anything that arises from the rowcount being < 0 and throwing an error. """ log.debug("QUERY: %s, %s", query, args) while tries > 0: cursor = dbcursor() try: cursor.execute(query, args) except psycopg2.OperationalError as ex: log.debug("Operational Error: %s", ex) cursor.close() tries -= 1 if tries == 0: raise psycopg2.Error( "Too many tries to run the query; giving up") continue except Exception as ex: if ex.args: text = str(ex) location = text.find("\nCONTEXT: ") if location > -1: ex.args = (text[0:location], ) log.debug("EX: %s %s", type(ex), ex) raise ex break rows = cursor.rowcount if rows < 0: raise psycopg2.Error("No results returned; may be an internal problem") if onerow and rows != 1: raise psycopg2.Error("Expected one row; got %d" % cursor) log.debug("QUERY returned %s rows", rows) return cursor
def test_should_use_s3_to_create_replica(self): with patch('psycopg2.connect', MagicMock(side_effect=psycopg2.Error("foo"))): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch( 'subprocess.check_output', MagicMock(side_effect=subprocess.CalledProcessError( 1, "cmd", "foo"))): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', MagicMock(side_effect=fake_backup_data_2)): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', MagicMock(side_effect=fake_backup_data_3)): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', MagicMock(side_effect=fake_backup_data_4)): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) self.wale_restore.should_use_s3_to_create_replica()
def removeMeasurementRun(self, agentHostIP, agentTrafficClass, agentFromIP, probeID): for stage in [1, 2]: try: if self.scheduler_dbCursor == None: raise psycopg2.Error('Disconnected from database') self.scheduler_dbCursor.execute( """ DELETE FROM ExperimentSchedule WHERE AgentHostIP = %(AgentHostIP)s AND AgentTrafficClass = %(AgentTrafficClass)s AND AgentFromIP = %(AgentFromIP)s AND ProbeID = %(ProbeID)s """, { 'AgentHostIP': str(agentHostIP), 'AgentTrafficClass': int(agentTrafficClass), 'AgentFromIP': str(agentFromIP), 'ProbeID': int(probeID) }) self.scheduler_dbConnection.commit() break except psycopg2.Error as e: self.connectToSchedulerDB() if stage == 2: print('Unable to list measurement runs: ' + str(e).strip()) return False return True
def queryAgents(self): # ====== Query database =============================================== AtlasMNSLogger.trace('Querying agents ...') for stage in [1, 2]: try: if self.scheduler_dbCursor == None: raise psycopg2.Error('Disconnected from database') self.scheduler_dbCursor.execute(""" SELECT AgentHostIP,AgentHostName,LastSeen,Location FROM AgentLastSeen ORDER BY AgentHostName,AgentHostIP """) table = self.scheduler_dbCursor.fetchall() break except psycopg2.Error as e: self.connectToSchedulerDB() if stage == 2: AtlasMNSLogger.warning('Failed to query agents: ' + str(e).strip()) return [] # ====== Provide result as list of dictionaries ======================= agents = [] for row in table: agents.append({ 'AgentHostIP': row[0], 'AgentHostName': row[1], 'LastSeen': row[2], 'Location': row[3] }) # print(agents) return agents
def _poll_connection(self, fd): """Check with psycopg2 to see what action to take. If the state is POLL_OK, we should have a pending callback for that fd. :param int fd: The socket fd for the postgresql connection """ try: state = self._connections[fd].poll() except (OSError, socket.error) as error: self._ioloop.remove_handler(fd) if fd in self._futures and not self._futures[fd].done(): self._futures[fd].set_exception( psycopg2.OperationalError('Connection error (%s)' % error)) except (psycopg2.Error, psycopg2.Warning) as error: if fd in self._futures and not self._futures[fd].done(): self._futures[fd].set_exception(error) else: if state == extensions.POLL_OK: if fd in self._futures and not self._futures[fd].done(): self._futures[fd].set_result(True) elif state == extensions.POLL_WRITE: self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE) elif state == extensions.POLL_READ: self._ioloop.update_handler(fd, ioloop.IOLoop.READ) elif state == extensions.POLL_ERROR: self._ioloop.remove_handler(fd) if fd in self._futures and not self._futures[fd].done(): self._futures[fd].set_exception( psycopg2.Error('Poll Error'))
def __init__(self, dbname, user, host, port, pwd): try: self.conn = psycopg2.connect( f"dbname='{dbname}' user='******' host='{host}' port='{port}' password='******'" ) except: raise psycopg2.Error('Connection error')
def updateScheduledEntry(self, scheduledEntry): AtlasMNSLogger.trace('Updating scheduled entry ...') for stage in [1, 2]: try: if self.scheduler_dbCursor == None: raise psycopg2.Error('Disconnected from database') self.scheduler_dbCursor.execute( """ UPDATE ExperimentSchedule SET State=%s,LastChange=NOW(),AgentHostIP=%s,AgentTrafficClass=%s, AgentFromIP=%s,ProbeID=%s,ProbeMeasurementID=%s,ProbeCost=%s,ProbeHostIP=%s,ProbeFromIP=%s,Info=%s WHERE Identifier = %s; """, [ scheduledEntry['State'], scheduledEntry['AgentHostIP'], scheduledEntry['AgentTrafficClass'], scheduledEntry['AgentFromIP'], scheduledEntry['ProbeID'], scheduledEntry['ProbeMeasurementID'], scheduledEntry['ProbeCost'], scheduledEntry['ProbeHostIP'], scheduledEntry['ProbeFromIP'], scheduledEntry['Info'], scheduledEntry['Identifier'] ]) self.scheduler_dbConnection.commit() break except psycopg2.Error as e: self.connectToSchedulerDB() if stage == 2: AtlasMNSLogger.warning('Failed to update schedule: ' + str(e).strip()) return False
def __init__(self, db, sql_statement): self._db = db cur = self._db.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute(sql_statement) self.cacherec = cur.fetchone() if self.cacherec is None: raise psycopg2.Error('Object not found: ' + sql_statement)
def sql_select_query(sql_command, parameters=[], row_num=None, mogrify=False): try: config_info = config("postgresql") conn = psycopg2.connect(**config_info) cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) if mogrify: return cursor.mogrify(sql_command, parameters).decode('utf-8') cursor.execute(sql_command, parameters) if row_num is None: response = cursor.fetchall() elif isinstance(row_num, int): response = cursor.fetchmany(row_num) else: raise TypeError except TypeError as error: raise TypeError( "row_num parameter must be an integer value or left empty") except psycopg2.OperationalError as error: raise ConnectionError("Operational error: Check database connection.") except psycopg2.Error as error: raise psycopg2.Error( f"QUERY: {sql_command} Parameters: {parameters} Error: {error.args}" ) cursor.close() conn.close() # Terminate the connection return response
def test_postgres_connection_error_location(self, es, pg_connect): """Se debería devolver un error 500 cuando falla la conexión a PostgreSQL (durante georreferenciación).""" self.set_msearch_results(es, [MOCK_STREET]) pg_connect.return_value.cursor.side_effect = psycopg2.Error( 'Mock error') self.assert_500_error('/direcciones?direccion=santa fe 1000')
def sql_update_query(sql_command, parameters=[], row_num=None, return_value=False, mogrify=False): try: config_info = config("postgresql") conn = psycopg2.connect(**config_info) cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) if mogrify: return cursor.mogrify(sql_command, parameters).decode('utf-8') cursor.execute(sql_command, parameters) if return_value: response = cursor.fetchall() else: response = cursor.rowcount conn.commit() except psycopg2.OperationalError as error: raise ConnectionError("Operational error: Check database connection.") except psycopg2.Error as error: raise psycopg2.Error( f"QUERY: {sql_command} Parameters: {parameters} Error: {error.args}" ) cursor.close() conn.close() # Terminate the connection return response
def _get_state_and_county(self, lon, lat): try: #result = None coordinates = (lat,lon) #print coordinates results = rg.search(coordinates) try: county = results[0]['admin2'].split()[:1][0] state = results[0]['admin1'] except: state = None county = None #state = results[0]['admin1'] if (lat>33 and lat <38 and lon>-103.5 and lon<-94): result = True else: print "NOT AOI event" result = None raise TypeError("Not in AOI") #print "We are in the AOI" #print result, Lat, Lon except pgdb.Error as error: raise pgdb.Error(error) except Exception as error: print error if result is None: # print "Not in the AOI!!!!!!" raise TypeError("Not in AOI") #print "We are NOT in the AOI" return result,county,state
def addMeasurementRun(self, agentHostIP, agentTrafficClass, agentFromIP, probeID): for stage in [1, 2]: try: if self.scheduler_dbCursor == None: raise psycopg2.Error('Disconnected from database') self.scheduler_dbCursor.execute( """ INSERT INTO ExperimentSchedule (AgentHostIP,AgentTrafficClass,AgentFromIP,ProbeID) VALUES (%(AgentHostIP)s,%(AgentTrafficClass)s,%(AgentFromIP)s,%(ProbeID)s) """, { 'AgentHostIP': str(agentHostIP), 'AgentTrafficClass': int(agentTrafficClass), 'AgentFromIP': str(agentFromIP), 'ProbeID': int(probeID) }) self.scheduler_dbConnection.commit() break except psycopg2.Error as e: self.connectToSchedulerDB() if stage == 2: print('Unable to add measurement run: ' + str(e).strip()) return False return True
def create_cursor(conn): try: cursor = conn.cursor() except psycopg2.Error as e: logger.error(e) raise psycopg2.Error('Could not create a cursor') return cursor
def test_database_error(self): """``execute`` database.DatabaseError instead of psycopg2 errors""" self.mocked_cursor.execute.side_effect = psycopg2.Error('testing') db = database.Database() with self.assertRaises(database.DatabaseError): db.execute(sql="SELECT * from FOO WHERE bar LIKE 'baz'")
def test_load_tables_update(self, mock_transaction, mock_create_tables, mock_error): job_id = str(uuid.uuid4()) load_tables(job_id, is_update=True) mock_transaction.assert_called_once_with(mock.ANY) mock_transaction.side_effect = psycopg2.Error() load_tables(job_id, is_update=True) self.assertTrue(mock_error.called)
def test_dbconn_connect_fail(nosleep, mocker): psycopg2_connect = mocker.patch('psycopg2.connect', side_effect=psycopg2.Error('Just an error...')) num_retries = 10 with pytest.raises(AssertionError): with dbconn.DBConn(DSN, num_retries=num_retries): pass assert psycopg2_connect.call_count == num_retries
def create_connection(db_url): conn = None try: conn = psycopg2.connect(db_url, sslmode='require', cursor_factory=DictCursor) except psycopg2.Error as e: logger.error(e) raise psycopg2.Error('Database connection failed') return conn
def on_message(client, userdata, message): neveraQuery = "INSERT INTO temperatura_nevera (temperatura,hielo, fecha) VALUES (%s,%s, %s)" ollaQuery1 = "INSERT INTO temperatura_olla (temperatura,mensaje,fecha) VALUES (%s, %s,%s)" ollaQuery2 = "INSERT INTO temperatura_olla (temperatura,fecha) VALUES (%s, %s)" print('------------------------------') print('topic: %s' % message.topic) print('payload: %s' % message.payload) print('qos: %d' % message.qos) resp = json.loads(message.payload) try: connection = psycopg2.connect( user='******', password='******', host='queenie.db.elephantsql.com', database='prbzchlc') cursor = connection.cursor() create_table_query = '''CREATE TABLE IF NOT EXISTS temperatura_nevera ( temperatura_nevera_id serial NOT NULL PRIMARY KEY, temperatura REAL NOT NULL, hielo INTEGER NOT NULL, mensaje VARCHAR (100), fecha timestamp NOT NULL ); ''' cursor.execute(create_table_query) connection.commit() create_table_query = '''CREATE TABLE IF NOT EXISTS temperatura_olla ( temperatura_olla_id serial NOT NULL PRIMARY KEY, temperatura REAL NOT NULL, mensaje VARCHAR(100), fecha timestamp NOT NULL ); ''' cursor.execute(create_table_query) connection.commit() if resp.get("Temperatura_Olla"): if resp.get("mensaje"): itemTuple = (float(resp["Temperatura_Olla"]), resp["mensaje"], resp["Fecha"]) cursor.execute(ollaQuery1, itemTuple) else: itemTuple = (float(resp["Temperatura_Olla"]), resp["Fecha"]) cursor.execute(ollaQuery2, itemTuple) connection.commit() print(itemTuple) elif resp.get("Temperatura_nevera"): itemTuple = (float(resp["Temperatura_nevera"]), resp["Cantidad_de_Hielo"], resp["Fecha"]) cursor.execute(neveraQuery, itemTuple) connection.commit() print(itemTuple) except (Exception, psycopg2.Error()) as Error: print("Error while connecting to PostgreSQL", error) finally: cursor.close() connection.close() print("PostgreSQL connection is closed")
def test_should_use_s3_to_create_replica(self): self.assertTrue(self.wale_restore.should_use_s3_to_create_replica()) with patch.object(MockConnect, 'server_version', PropertyMock(return_value=100000)): self.assertTrue( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output.replace(b'167772160', b'1'))): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('psycopg2.connect', Mock(side_effect=psycopg2.Error("foo"))): save_no_master = self.wale_restore.no_master save_master_connection = self.wale_restore.master_connection self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('time.sleep', Mock(return_value=None)) as mock_sleep: self.wale_restore.no_master = 1 assert self.wale_restore.should_use_s3_to_create_replica() # verify retries mock_sleep.assert_has_calls( [((wale_restore.RETRY_SLEEP_INTERVAL, ), )] * WALE_TEST_RETRIES) self.wale_restore.master_connection = '' self.assertTrue( self.wale_restore.should_use_s3_to_create_replica()) self.wale_restore.no_master = save_no_master self.wale_restore.master_connection = save_master_connection with patch( 'subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError( 1, "cmd", "foo"))): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output_header)): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch('subprocess.check_output', Mock(return_value=wale_output + wale_output_values)): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica()) with patch( 'subprocess.check_output', Mock(return_value=wale_output.replace(b'expanded_size_bytes', b'expanded_size_foo'))): self.assertFalse( self.wale_restore.should_use_s3_to_create_replica())
def test_add_port_db_error(self): """``add_port`` raises DatabaseError for unexpected DB problems""" self.mocked_cursor.execute.side_effect = psycopg2.Error('testing') db = database.Database() with self.assertRaises(database.DatabaseError): db.add_port(target_addr='1.1.1.1', target_port=22, target_name='myBox', target_component='OneFS')
def test_auto_rollback(self): """``execute`` auto rollsback the db connection upon error""" self.mocked_cursor.execute.side_effect = psycopg2.Error('testing') db = database.Database() try: db.execute(sql="SELECT * from FOO WHERE bar LIKE 'baz'") except database.DatabaseError: pass self.assertEqual(self.mocked_connection.rollback.call_count, 1)
def change(cn, sql): try: cr = cn.cursor() cr.execute(sql) cn.commit() rowsAffected = cr.rowcount except (Exception, db.Error()) as err: print('DB error: ', err) finally: cr.close() cn.close() return {"rowsAffected": rowsAffected}
def get_column_names(connection): cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute( 'SELECT COLUMN_NAME FROM information_schema.COLUMNS WHERE TABLE_NAME = \'movie_all\'' ) result = cursor.fetchall() if not result: raise psycopg2.Error(f'Table movie_all couldn\'t be found.') fields = [] for row in result: field = row[0] fields.append(field) return fields
def querySchedule(self, identifier=None): # ====== Query database =============================================== AtlasMNSLogger.trace('Querying schedule ...') for stage in [1, 2]: try: if self.scheduler_dbCursor == None: raise psycopg2.Error('Disconnected from database') if identifier != None: self.scheduler_dbCursor.execute( """ SELECT * FROM ExperimentSchedule WHERE Identifier = %(Identifier)s """, {'Identifier': int(identifier)}) else: self.scheduler_dbCursor.execute(""" SELECT Identifier,State,LastChange,AgentMeasurementTime,AgentHostIP,AgentTrafficClass,AgentFromIP,ProbeID,ProbeMeasurementID,ProbeCost,ProbeHostIP,ProbeFromIP,Info FROM ExperimentSchedule ORDER BY LastChange ASC; """) table = self.scheduler_dbCursor.fetchall() break except psycopg2.Error as e: self.connectToSchedulerDB() if stage == 2: AtlasMNSLogger.warning('Failed to query schedule: ' + str(e).strip()) return [] # ====== Provide result as list of dictionaries ======================= schedule = [] for row in table: schedule.append({ 'Identifier': row[0], 'State': row[1], 'LastChange': row[2], 'AgentMeasurementTime': row[3], 'AgentHostIP': row[4], 'AgentTrafficClass': row[5], 'AgentFromIP': row[6], 'ProbeID': row[7], 'ProbeMeasurementID': row[8], 'ProbeCost': row[9], 'ProbeHostIP': row[10], 'ProbeFromIP': row[11], 'Info': row[12] }) # print(schedule) return schedule
def _delete_event(self, event_id): """ Private method to delete event with given event_id from the database. Arguments: @param {str} event_id - Id of the event to remove. """ sql = ("DELETE FROM earthquake_quake.quakes WHERE event_id=%s") values = (event_id, ) self.logger.debug("Deleting event with id: %s", event_id) try: self.cursor.execute(sql, values) except pgdb.Error as error: raise pgdb.Error(error)
def check_availability(db, table, use_this_query=None): try: # check whether <table> has rows and if not, populate the table in an interactive mode res = select_all_rows(db, table.select_all_query) if 0 == len(res): print '"{}" is empty'.format(table.__name__) rv = populate_interactive(db, table) if 0 >= rv: raise pg_driver.Error( 'Not enough rows in "{}" to fulfil the operation'.format( table.__name__)) query = table.select_all_query if use_this_query == None else use_this_query return select_all_rows(db, query) except pg_driver.Error as e: pass
def query(sql, data=(), column_names=[], pandas_format=True): conn = psycopg2.connect(DATABASE_URL) cursor = conn.cursor() cursor.execute("SET TIME ZONE 'Singapore';") cursor.execute(sql, data) records = cursor.fetchall() # This returns a list of results (where results is represented as a tuple) if not pandas_format: return records # For empty records, we still want to preserve the relevant column names if len(records) == 0: return pd.DataFrame(columns=column_names) # For error cases with no column names, or mismatch of no. of columns if len(column_names) == 0 or len(records[0]) != len(column_names): raise psycopg2.Error('Mismatch number of columns headings and body.') return to_pandas(column_names, records).set_index('id')
def nextId(connection, tableName): sql = 'SELECT MAX(id) FROM ' + tableName + ';' rowCount = -1 try: cr = connection.cursor() cr.execute(sql) result = cr.fetchone() rowCount = result[0] rowCount += 1 except (Exception, db.Error()) as err: print('DB error: ', err) finally: cr.close() return rowCount
def fetchDB(self, query, arguments=(None, )): for i in range(DB_RETRY): if i == 2: print('DB connection broken. Aborting') raise psycopg2.Error('Can not connect to DB') try: cur = self.conn.cursor() cur.execute(query, arguments) db_res = cur.fetchall() cur.close() return db_res except psycopg2.Error as e: print('DB connection broken. Retry...', e) self.conn = self.createConnection() time.sleep(0.5) return