コード例 #1
0
def establish_user_defined_connection(instance, conn, clusters):
    """ connect the database and get all necessary options like pid and work_directory
        we use port, host and socket_directory, prefering socket over TCP connections
    """
    pgcon = None
    # establish a new connection
    try:
        pgcon = psycopg2.connect(**conn)
    except Exception as e:
        logger.error('failed to establish connection to {0} via {1}'.format(
            instance, conn))
        logger.error('PostgreSQL exception: {0}'.format(e))
        return None
    # get the database version from the pgcon properties
    dbver = dbversion_as_float(pgcon)
    cur = pgcon.cursor()
    cur.execute('show data_directory')
    work_directory = cur.fetchone()[0]
    cur.close()
    pgcon.commit()
    # now, when we have the work directory, acquire the pid of the postmaster.
    pid = read_postmaster_pid(work_directory, instance)
    if pid is None:
        logger.error(
            'failed to read pid of the postmaster on {0}'.format(conn))
        return None
    # check that we don't have the same pid already in the accumulated results.
    # for instance, a user may specify 2 different set of connection options for
    # the same database (one for the unix_socket_directory and another for the host)
    pids = [opt['pid'] for opt in clusters if 'pid' in opt]
    if pid in pids:
        duplicate_instance = [
            opt['name'] for opt in clusters
            if 'pid' in opt and opt.get('pid', 0) == pid
        ][0]
        logger.error('duplicate connection options detected for databases '
                     '{0} and {1}, same pid {2}, skipping {0}'.format(
                         instance, duplicate_instance, pid))
        pgcon.close()
        return True
    # now we have all components to create a cluster descriptor
    desc = make_cluster_desc(name=instance,
                             version=dbver,
                             workdir=work_directory,
                             pid=pid,
                             pgcon=pgcon,
                             conn=conn)
    clusters.append(desc)
    return True
コード例 #2
0
 def refresh(self):
     """ Reads data from /proc and PostgreSQL stats """
     result = []
     # fetch up-to-date list of subprocess PIDs
     self.get_subprocesses_pid()
     try:
         if not self.pgcon:
             # if we've lost the connection, try to reconnect and
             # re-initialize all connection invariants
             self.pgcon, self.postmaster_pid = self.reconnect()
             self.connection_pid = self.pgcon.get_backend_pid()
             self.max_connections = self._get_max_connections()
             self.dbver = dbversion_as_float(self.pgcon)
             self.server_version = self.pgcon.get_parameter_status(
                 'server_version')
         stat_data = self._read_pg_stat_activity()
     except psycopg2.OperationalError as e:
         logger.info("failed to query the server: {}".format(e))
         if self.pgcon and not self.pgcon.closed:
             self.pgcon.close()
         self.pgcon = None
         self._do_refresh([])
         return
     logger.info("new refresh round")
     for pid in self.pids:
         if pid == self.connection_pid:
             continue
         is_backend = pid in stat_data
         is_active = is_backend and (stat_data[pid]['query'] != 'idle'
                                     or pid in self.always_track_pids)
         result_row = {}
         # for each pid, get hash row from /proc/
         proc_data = self._read_proc(pid, is_backend, is_active)
         if proc_data:
             result_row.update(proc_data)
         if stat_data and pid in stat_data:
             # ditto for the pg_stat_activity
             result_row.update(stat_data[pid])
         # result is not empty - add it to the list of current rows
         if result_row:
             result.append(result_row)
     # and refresh the rows with this data
     self._do_refresh(result)
コード例 #3
0
ファイル: pg_collector.py プロジェクト: kmatt/pg_view
 def refresh(self):
     """ Reads data from /proc and PostgreSQL stats """
     result = []
     # fetch up-to-date list of subprocess PIDs
     self.get_subprocesses_pid()
     try:
         if not self.pgcon:
             # if we've lost the connection, try to reconnect and
             # re-initialize all connection invariants
             self.pgcon, self.postmaster_pid = self.reconnect()
             self.connection_pid = self.pgcon.get_backend_pid()
             self.max_connections = self._get_max_connections()
             self.dbver = dbversion_as_float(self.pgcon)
             self.server_version = self.pgcon.get_parameter_status('server_version')
         stat_data = self._read_pg_stat_activity()
     except psycopg2.OperationalError as e:
         logger.info("failed to query the server: {}".format(e))
         if self.pgcon and not self.pgcon.closed:
             self.pgcon.close()
         self.pgcon = None
         self._do_refresh([])
         return
     logger.info("new refresh round")
     for pid in self.pids:
         if pid == self.connection_pid:
             continue
         is_backend = pid in stat_data
         is_active = is_backend and (stat_data[pid]['query'] != 'idle' or pid in self.always_track_pids)
         result_row = {}
         # for each pid, get hash row from /proc/
         proc_data = self._read_proc(pid, is_backend, is_active)
         if proc_data:
             result_row.update(proc_data)
         if stat_data and pid in stat_data:
             # ditto for the pg_stat_activity
             result_row.update(stat_data[pid])
         # result is not empty - add it to the list of current rows
         if result_row:
             result.append(result_row)
     # and refresh the rows with this data
     self._do_refresh(result)
コード例 #4
0
ファイル: db_client.py プロジェクト: kmatt/pg_view
def establish_user_defined_connection(instance, conn, clusters):
    """ connect the database and get all necessary options like pid and work_directory
        we use port, host and socket_directory, prefering socket over TCP connections
    """
    # establish a new connection
    try:
        pgcon = psycopg2.connect(**conn)
    except Exception as e:
        logger.error('failed to establish connection to {0} via {1}'.format(instance, conn))
        logger.error('PostgreSQL exception: {0}'.format(e))
        return None
    # get the database version from the pgcon properties
    dbver = dbversion_as_float(pgcon)
    cur = pgcon.cursor()
    cur.execute('show data_directory')
    work_directory = cur.fetchone()[0]
    cur.close()
    pgcon.commit()
    # now, when we have the work directory, acquire the pid of the postmaster.
    pid = read_postmaster_pid(work_directory, instance)
    if pid is None:
        logger.error('failed to read pid of the postmaster on {0}'.format(conn))
        return None
    # check that we don't have the same pid already in the accumulated results.
    # for instance, a user may specify 2 different set of connection options for
    # the same database (one for the unix_socket_directory and another for the host)
    pids = [opt['pid'] for opt in clusters if 'pid' in opt]
    if pid in pids:
        duplicate_instance = [opt['name'] for opt in clusters if 'pid' in opt and opt.get('pid', 0) == pid][0]
        logger.error('duplicate connection options detected for databases '
                     '{0} and {1}, same pid {2}, skipping {0}'.format(instance, duplicate_instance, pid))
        pgcon.close()
        return True
    # now we have all components to create a cluster descriptor
    desc = make_cluster_desc(name=instance, version=dbver, workdir=work_directory,
                             pid=pid, pgcon=pgcon, conn=conn)
    clusters.append(desc)
    return True