예제 #1
0
def test_postgres_connect(mocker):
    mocker.patch('temboardagent.postgres.connect', autospec=True)

    from temboardagent.postgres import Postgres

    postgres = Postgres(host='myhost')
    with postgres.connect() as conn:
        assert conn
    assert conn.close.called is True

    assert 'myhost' in repr(postgres)
예제 #2
0
def test_postgres_fetch_version(mocker):
    c = mocker.patch('temboardagent.postgres.connect', autospec=True)

    conn = c.return_value
    conn.server_version = 90400

    from temboardagent.postgres import Postgres

    postgres = Postgres(host='myhost')
    version = postgres.fetch_version()

    assert 90400 == version
    assert conn.close.called is True
예제 #3
0
def get_postgres(app_config, database):
    '''
    Same as `app.postgres` but with specific database not the default one.
    '''
    config = dict(**app_config.postgresql)
    config.update(dbname=database)
    return Postgres(**config)
예제 #4
0
    def run(self, conninfo):
        if not conninfo['standby']:
            return []

        conn = connector(conninfo['host'], conninfo['port'], conninfo['user'],
                         conninfo['password'], 'postgres')

        try:
            with Postgres(**conninfo).connect() as conn:
                # Get primary parameters from primary_conninfo
                p_host, p_port, p_user, p_password = get_primary_conninfo(conn)

                # pg_stat_wal_receiver lookup
                rows = conn.query("""\
                SELECT '{p_host}' AS upstream, NOW() AS datetime,
                CASE WHEN COUNT(*) > 0 THEN 1 ELSE 0 END AS connected
                FROM pg_stat_wal_receiver
                WHERE status='streaming' AND
                      conninfo LIKE '%host={p_host}%'
                """.format(p_host=p_host))
                r = list(rows)
                if len(r) == 0:
                    return []
                return r

        except Exception as e:
            logger.exception(str(e))
            return []
예제 #5
0
def get_statements(http_context, app):
    """Return a snapshot of latest statistics of executed SQL statements
    """
    config = app.config
    dbname = config.statements.dbname
    assert dbname == "postgres", dbname
    snapshot_datetime = now()
    conninfo = dict(config.postgresql, dbname=dbname)
    try:
        with Postgres(**conninfo).connect() as conn:
            data = list(conn.query(query))
    except Exception as e:
        pg_version = app.postgres.fetch_version()
        if (pg_version < 90600
                or 'relation "pg_stat_statements" does not exist' in str(e)):
            raise HTTPError(
                404, "pg_stat_statements not enabled on database %s" % dbname)
        logger.error(
            "Failed to get pg_stat_statements data on database %s: %s",
            dbname,
            e,
        )
        raise HTTPError(500, e)
    else:
        return {"snapshot_datetime": snapshot_datetime, "data": data}
예제 #6
0
def test_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.postgres import Postgres

    orig = Postgres(host='myhost')
    copy = unpickle(pickle(orig))
    assert 'myhost' == copy.host
예제 #7
0
def pgconnect(**kw):
    defaults = dict(
        host=ENV['pg']['socket_dir'], port=ENV['pg']['port'],
        user=ENV['pg']['user'], password=ENV['pg']['password'],
        database='postgres',
    )
    kw = dict(defaults, **kw)
    return Postgres(**kw).connect()
예제 #8
0
    def run_sql(self, conninfo, sql, database=None):
        """Get the result of the SQL query"""
        if sql is None:
            return []

        # Default the connection database to the one configured,
        # useful for instance level sql probes
        if database is None:
            database = conninfo['database']

        output = []
        try:
            with Postgres(**conninfo).connect() as conn:

                cluster_name = conninfo['instance'].replace('/', '')
                for r in conn.query(sql):
                    # Add the info of the instance (port) to the
                    # result to output one big list for all instances and
                    # all databases
                    r['port'] = conninfo['port']

                    # Compute delta if the probe needs that
                    if self.delta_columns is not None:
                        to_delta = {}

                        # XXX. Convert results to float(), spc retrieves
                        # everything as string. So far psycopg2 on the
                        # server side handles to rest
                        for k in self.delta_columns:
                            if k in r.keys():
                                to_delta[k] = float(r[k])

                        # Create the store key for the delta
                        if self.delta_key is not None:
                            key = cluster_name + database + r[self.delta_key]
                        else:
                            key = cluster_name + database

                        # Calculate delta
                        (interval, deltas) = self.delta(key, to_delta)

                        # The first time, no delta is returned
                        if interval is None:
                            continue

                        # Merge result and add the interval column
                        r.update(deltas)
                        r[self.delta_interval_column] = interval

                    output.append(r)
        except Exception as e:
            logger.error(
                "Unable to run probe \"%s\" on \"%s\" on database \"%s\": %s",
                e, self.get_name(), conninfo['instance'], database)
        return output
예제 #9
0
    def run(self, conninfo):
        if not conninfo['standby']:
            return []

        try:
            with Postgres(**conninfo).connect() as conn:

                # Get primary parameters from primary_conninfo
                p_host, p_port, p_user, p_password = get_primary_conninfo(conn)

                # Let's fetch primary current wal position with IDENTIFY_SYSTEM
                # through streaming replication protocol.
                p_conn = connector(p_host, int(p_port), p_user, p_password,
                                   database='replication')
                p_conn._replication = 1
                p_conn.connect()
                p_conn.execute("IDENTIFY_SYSTEM")
                r = list(p_conn.get_rows())
                if len(r) == 0:
                    conn.close()
                    p_conn.close()
                    return []
                xlogpos = r[0]['xlogpos']
                p_conn.close()

                # Proceed with LSN diff
                if conn.server_version >= 100000:
                    rows = conn.query("""\
                    SELECT pg_wal_lsn_diff(
                      '{xlogpos}'::pg_lsn,
                       pg_last_wal_replay_lsn()
                    ) AS lsn_diff, NOW() AS datetime
                    """.format(xlogpos=xlogpos))
                else:
                    rows = conn.query("""\
                    SELECT pg_xlog_location_diff("
                      '{xlogpos}'::TEXT,"
                       pg_last_xlog_replay_location()"
                    ) AS lsn_diff, NOW() AS datetime
                    """.format(xlogpos=xlogpos))
                r = list(rows)
                if len(r) == 0:
                    return []
                return [{'lag': int(r[0]['lsn_diff']),
                        'datetime': r[0]['datetime']}]

        except Exception as e:
            logger.exception(str(e))
            return []
예제 #10
0
def pg_add_super_user(pg_bin, pg_user, pg_host, pg_port, pg_password=''):
    """
    Create a new PostgreSQL super-user.
    """
    (ret_code, stdout, stderr) = exec_command(
        [pg_bin + "/createuser", "-h", pg_host, "-p", pg_port, "-ls", pg_user])
    if ret_code != 0:
        raise Exception(str(stderr))

    if not pg_password:
        return

    with Postgres(
            host=pg_host,
            port=pg_port,
            user=pg_user,
            dbname='postgres',
    ).connect() as conn:
        query = "ALTER USER %s PASSWORD '%s'" % (pg_user, pg_password)
        conn.execute(query)
예제 #11
0
 def get_version(self, conninfo):
     try:
         with Postgres(**conninfo).connect() as conn:
             return conn.server_version
     except Exception:
         logger.error("Unable to get server version")
예제 #12
0
def instance_info(conninfo, hostname):
    """Gather PostgreSQL instance information."""
    instance_info = {
        'hostname': hostname,
        'instance': conninfo['instance'],
        'local_name': conninfo.get('local_name', conninfo['instance']),
        'available': True,
        'host': conninfo['host'],
        'port': conninfo['port'],
        'user': conninfo['user'],
        'database': conninfo['database'],
        'password': conninfo['password']
    }

    # Try the connection
    try:
        with Postgres(**conninfo).connect() as conn:
            # Get PostgreSQL informations using PgInfo
            pginfo = PgInfo(conn)
            pgv = pginfo.version()
            # Gather the info while where are connected
            instance_info['version_num'] = pgv['num']
            instance_info['version'] = pgv['server']
            instance_info['data_directory'] = pginfo.setting('data_directory')

            # hot standby is available from 9.0
            instance_info['standby'] = pginfo.is_in_recovery()

            # max_connections
            instance_info['max_connections'] = pginfo.setting(
                'max_connections')

            # Grab the list of tablespaces
            instance_info['tablespaces'] = pginfo.tablespaces(
                instance_info['data_directory'])

            # When the user has not given a dbnames list or '*' in the
            # configuration file, we must get the list of databases. Since
            # we have a working connection, let's do it now.
            dbs = pginfo.databases()
            instance_info['dbnames'] = []
            for db in conninfo['dbnames']:
                if db == '*':
                    instance_info['dbnames'] = list(dbs.values())
                    break
                if db in dbs.keys():
                    instance_info['dbnames'].append(dbs[db])

        # Now that we have the data_directory, find the owner
        try:
            statinfo = os.stat(instance_info['data_directory'])
            instance_info['sysuser'] = pwd.getpwuid(statinfo.st_uid).pw_name
        except OSError as e:
            logging.warning("Unable to get the owner of PGDATA: %s", str(e))
            instance_info['sysuser'] = None

    except Exception as e:
        logging.exception(str(e))
        logging.warning("Unable to gather information for cluster \"%s\"",
                        conninfo['instance'])
        instance_info['available'] = False

    return instance_info