def run(self, conninfo): if not conninfo['standby']: return [] conn = connector(conninfo['host'], conninfo['port'], conninfo['user'], conninfo['password'], 'postgres') try: conn.connect() # Get primary parameters from primary_conninfo (p_host, p_port, p_user, p_password) = get_primary_conninfo(conn) # Let's fetch primary current wal position with IDENTIFY_SYSTEM # through streaming replication protocol. p_conn = connector(p_host, int(p_port), p_user, p_password, database='replication') p_conn._replication = 1 p_conn.connect() p_conn.execute("IDENTIFY_SYSTEM") r = list(p_conn.get_rows()) if len(r) == 0: conn.close() p_conn.close() return [] xlogpos = r[0]['xlogpos'] p_conn.close() # Proceed with LSN diff if conn.get_pg_version() >= 100000: conn.execute( "SELECT pg_wal_lsn_diff(" " '{xlogpos}'::pg_lsn," " pg_last_wal_replay_lsn()" ") AS lsn_diff, NOW() AS datetime".format(xlogpos=xlogpos)) else: conn.execute( "SELECT pg_xlog_location_diff(" " '{xlogpos}'::TEXT," " pg_last_xlog_replay_location()" ") AS lsn_diff, NOW() AS datetime".format(xlogpos=xlogpos)) r = list(conn.get_rows()) conn.close() if len(r) == 0: return [] return [{ 'lag': int(r[0]['lsn_diff']), 'datetime': r[0]['datetime'] }] except Exception as e: logger.exception(str(e)) try: conn.close() except Exception: pass return []
def run(self, conninfo): if not conninfo['standby']: return [] conn = connector(conninfo['host'], conninfo['port'], conninfo['user'], conninfo['password'], 'postgres') try: conn.connect() # Get primary parameters from primary_conninfo (p_host, p_port, p_user, p_password) = get_primary_conninfo(conn) # pg_stat_wal_receiver lookup conn.execute( "SELECT '{p_host}' AS upstream, NOW() AS datetime, " "CASE WHEN COUNT(*) > 0 THEN 1 ELSE 0 END AS connected " "FROM pg_stat_wal_receiver " "WHERE status='streaming' AND " " conninfo LIKE '%host={p_host}%'".format(p_host=p_host)) r = list(conn.get_rows()) if len(r) == 0: conn.close() return [] conn.close() return r except Exception as e: logger.exception(str(e)) try: conn.close() except Exception: pass return []
def run(self, conninfo): if not conninfo['standby']: return [] conn = connector(conninfo['host'], conninfo['port'], conninfo['user'], conninfo['password'], 'postgres') try: with Postgres(**conninfo).connect() as conn: # Get primary parameters from primary_conninfo p_host, p_port, p_user, p_password = get_primary_conninfo(conn) # pg_stat_wal_receiver lookup rows = conn.query("""\ SELECT '{p_host}' AS upstream, NOW() AS datetime, CASE WHEN COUNT(*) > 0 THEN 1 ELSE 0 END AS connected FROM pg_stat_wal_receiver WHERE status='streaming' AND conninfo LIKE '%host={p_host}%' """.format(p_host=p_host)) r = list(rows) if len(r) == 0: return [] return r except Exception as e: logger.exception(str(e)) return []
def pg_add_super_user(pg_bin, pg_user, pg_host, pg_port, pg_password=''): """ Create a new PostgreSQL super-user. """ (ret_code, stdout, stderr) = exec_command([ pg_bin+"/createuser", "-h", pg_host, "-p", pg_port, "-ls", pg_user]) if ret_code != 0: raise Exception(str(stderr)) conn = connector(host=pg_host, port=pg_port, user=pg_user, database='postgres') if len(pg_password) > 0: try: conn.connect() query = "ALTER USER %s PASSWORD '%s'" % ( pg_user, pg_password ) conn.execute(query) conn.close() except error as e: raise Exception(str(e.message))
def worker_vacuum(commands, command, config): start_time = time.time() * 1000 set_logger_name("vacuum_worker") logger = get_logger(config) logger.info("Starting with pid=%s" % (os.getpid())) logger.debug("commandid=%s" % (command.commandid, )) try: command.state = COMMAND_START command.time = time.time() commands.update(command) parameters = pickle.loads(base64.b64decode(command.parameters)) logger.debug("table=%s, mode=%s, database=%s" % ( parameters['table'], parameters['mode'], parameters['database'], )) conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=parameters['database']) conn.connect() if parameters['mode'] == 'standard': query = "VACUUM %s" % (parameters['table'], ) else: query = "VACUUM %s %s" % ( parameters['mode'], parameters['table'], ) conn.execute(query) conn.close() except (error, SharedItem_not_found, Exception) as e: command.state = COMMAND_ERROR command.result = str(e) command.time = time.time() logger.traceback(get_tb()) logger.error(str(e)) try: commands.update(command) conn.close() except Exception as e: pass logger.info("Failed.") return try: command.state = COMMAND_DONE command.time = time.time() commands.update(command) except Exception as e: logger.traceback(get_tb()) logger.error(str(e)) logger.info("Done.") logger.debug(" in %s s." % (str( (time.time() * 1000 - start_time) / 1000), ))
def run_sql(self, conninfo, sql, database=None): """Get the result of the SQL query""" if sql is None: return [] # Default the connection database to the one configured, # useful for instance level sql probes if database is None: database = conninfo['database'] conn = connector(conninfo['host'], conninfo['port'], conninfo['user'], conninfo['password'], database) output = [] try: conn.connect() conn.execute(sql) for r in conn.get_rows(): # Add the info of the instance (port) to the # result to output one big list for all instances and # all databases r['port'] = conninfo['port'] # Compute delta if the probe needs that if self.delta_columns is not None: to_delta = {} # XXX. Convert results to float(), spc retrieves # everything as string. So far psycopg2 on the # server side handles to rest for k in self.delta_columns: if k in r.keys(): to_delta[k] = float(r[k]) # Create the store key for the delta if self.delta_key is not None: key = conninfo['instance'] + database + \ r[self.delta_key] else: key = conninfo['instance'] + database # Calculate delta (interval, deltas) = self.delta(key, to_delta) # The first time, no delta is returned if interval is None: continue # Merge result and add the interval column r.update(deltas) r[self.delta_interval_column] = interval output.append(r) conn.close() except error: logger.error( "Unable to run probe \"%s\" on \"%s\" on database \"%s\"", self.get_name(), conninfo['instance'], database) return output
def _exec_query(self, dbname, query): conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname) conn.connect() conn.execute(query) conn.close() return list(conn.get_rows())
def _drop_dummy_db(self, dbname): conn = connector( host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database='postgres' ) conn.connect() conn.execute("DROP DATABASE %s" % (dbname)) conn.close()
def get_version(self, conninfo): conn = connector(conninfo['host'], conninfo['port'], conninfo['user'], conninfo['password'], conninfo['database']) try: conn.connect() version = conn.get_pg_version() conn.close() return version except error: logger.error("Unable to get server version")
def create_database(dbname): """ Create a database. """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database='postgres') try: conn.connect() conn.execute("CREATE DATABASE %s" % (dbname)) conn.close() except error: pass
def update_rows(dbname, tablename): """ Update all rows of a table. """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname) try: conn.connect() conn.execute("UPDATE %s SET id = id + 1" % (tablename)) conn.close() except error: pass
def pg_sleep(duration=1): """ Start a new PG connection and run pg_sleep() """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database='postgres') try: conn.connect() conn.execute("SELECT pg_sleep(%s)" % (duration)) conn.close() except error: pass
def conn(): cnx = connector( host=ENV["pg"]["socket_dir"], port=ENV["pg"]["port"], user=ENV["pg"]["user"], password=ENV["pg"]["password"], database="postgres", ) cnx.connect() try: yield cnx finally: cnx.close()
def _create_dummy_table(self, dbname, tablename): conn = connector( host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname ) conn.connect() conn.execute("CREATE TABLE %s (id INTEGER)" % (tablename)) conn.execute("INSERT INTO %s SELECT generate_series(1, 500000)" % ( tablename )) conn.close()
def create_table(dbname, tablename): """ Create a table and insert 5 rows in it. """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname) try: conn.connect() conn.execute("CREATE TABLE %s (id INTEGER)" % (tablename)) conn.execute("INSERT INTO %s SELECT generate_series(1, 5)" % (tablename)) conn.close() except error: pass
def test_00_env_pg(self): """ [api] 00: PostgreSQL instance is up & running """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database='postgres') try: conn.connect() conn.close() assert True except error: assert False
def lock_table_exclusive(dbname, tablename, duration): """ Lock a table in exclusive mode for a while (duration in seconds). """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname) try: conn.connect() conn.execute("BEGIN") conn.execute("LOCK TABLE %s IN EXCLUSIVE MODE" % (tablename)) time.sleep(float(duration)) conn.execute("ROLLBACK") conn.close() except error: pass
def test_00_env_pg(self): """ [activity] 00: PostgreSQL instance is up & running """ conn = connector(host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database='postgres') try: conn.connect() conn.close() global XSESSION XSESSION = self._temboard_login() assert True except error: assert False
def dashboard_collector_worker(config): try: signal.signal(signal.SIGTERM, dashboard_worker_sigterm_handler) logger.debug("Collecting data") conn = connector( host=config['postgresql']['host'], port=config['postgresql']['port'], user=config['postgresql']['user'], password=config['postgresql']['password'], database=config['postgresql']['dbname'] ) conn.connect() # convert config dict to namedtuple config_nt = collections.namedtuple( '__config', ['temboard', 'plugins', 'postgresql', 'logging'] )( temboard=config['temboard'], plugins=config['plugins'], postgresql=config['postgresql'], logging=config['logging'] ) # Collect data data = metrics.get_metrics(conn, config_nt) conn.close() # We don't want to store notifications in the history. data.pop('notifications', None) q = Queue('%s/dashboard.q' % (config['temboard']['home']), max_length=(config['plugins']['dashboard']['history_length'] +1), overflow_mode='slide' ) q.push(Message(content=json.dumps(data))) logger.debug(data) logger.debug("End") except (error, Exception) as e: logger.error("Could not collect data") logger.exception(e) try: conn.close() except Exception: pass sys.exit(1)
def worker_vacuum(commands, command, config): start_time = time.time() * 1000 set_logger_name("administration_worker") logger = get_logger(config) logger.info("[vacuum] Starting: pid=%s commandid=%s" % (os.getpid(), command.commandid,)) command.state = COMMAND_START command.time = time.time() commands.update(command) parameters = pickle.loads(base64.b64decode(command.parameters)) logger.info("[vacuum] table=%s, mode=%s, database=%s" % (parameters['table'], parameters['mode'], parameters['database'],)) conn = connector( host = config.postgresql['host'], port = config.postgresql['port'], user = config.postgresql['user'], password = config.postgresql['password'], database = parameters['database'] ) try: conn.connect() if parameters['mode'] == 'standard': query = "VACUUM %s" % (parameters['table'],) else: query = "VACUUM %s %s" % (parameters['mode'], parameters['table'],) conn.execute(query) conn.close() except error as e: command.state = COMMAND_ERROR command.result = str(e.message) command.time = time.time() commands.update(command) logger.error("%s" % (str(e.message))) try: conn.close() except error as e: pass except Exception: pass return command.state = COMMAND_DONE command.time = time.time() commands.update(command) logger.info("[vacuum] done in %s s." % (str((time.time()*1000 - start_time)/1000),))
def api_function_wrapper_pg(config, http_context, sessions, module, function_name): """ API function wrapper in charge of: - instanciating a new logger; - check the user session id; - start a new PostgreSQL connection; - call the function 'function_name' from 'module_name' module and return its result; - close PG connection. """ logger.debug("Calling %s.%s()." % ( module.__name__, function_name, )) logger.debug(http_context) try: username = check_sessionid(http_context['headers'], sessions) http_context['username'] = username conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) conn.connect() dm = getattr(module, function_name)(conn, config, http_context) conn.close() logger.debug("Done.") return dm except (error, Exception, HTTPError) as e: logger.exception(str(e)) logger.debug("Failed.") try: conn.close() except Exception: pass if isinstance(e, HTTPError): raise e else: raise HTTPError(500, "Internal error.")
def _get_duration_since_last_vacuum(self, dbname, tablename): conn = connector( host=ENV['pg']['socket_dir'], port=ENV['pg']['port'], user=ENV['pg']['user'], password=ENV['pg']['password'], database=dbname ) conn.connect() query = """ SELECT coalesce(EXTRACT(EPOCH FROM (NOW() - last_vacuum)),0) AS duration FROM pg_stat_user_tables WHERE relname = '%s'""" % ( tablename ) conn.execute(query) conn.close() return list(conn.get_rows())[0]['duration']
def api_function_wrapper_pg(config, http_context, sessions, module, function_name): """ Simple API function wrapper in charge of: - instanciate a new logger; - check the user session id; - start a new PostgreSQL connexion; - call a function named 'function_name' from 'module_name' module and return its result; - close the PG connexion. """ logger = get_logger(config) logger.info("%s - %s" % ( module.__name__, function_name, )) username = check_sessionid(http_context['headers'], sessions) http_context['username'] = username conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) try: conn.connect() dm = getattr(module, function_name)(conn, config, http_context) conn.close() return dm except (error, Exception, HTTPError) as e: logger.error(format_exc()) try: conn.close() except Exception: pass if isinstance(e, HTTPError): raise HTTPError(e.code, e.message['error']) else: raise HTTPError(500, "Internal error.")
def dashboard_collector_worker(commands, command, config): try: signal.signal(signal.SIGTERM, dashboard_worker_sigterm_handler) start_time = time.time() * 1000 set_logger_name("dashboard_collector") logger = get_logger(config) logger.debug("Starting with pid=%s" % (getpid())) logger.debug("commandid=%s" % (command.commandid)) command.state = COMMAND_START command.time = time.time() command.pid = getpid() commands.update(command) conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) conn.connect() db_metrics = metrics.get_metrics(conn, config) # We don't want to store notifications in the history. db_metrics.pop('notifications', None) conn.close() q = Queue('%s/dashboard.q' % (config.temboard['home']), max_length=(config.plugins['dashboard']['history_length'] + 1), overflow_mode='slide') q.push(Message(content=json.dumps(db_metrics))) logger.debug("Duration: %s." % (str(time.time() * 1000 - start_time))) logger.debug("Done.") except (error, Exception) as e: logger.traceback(get_tb()) logger.error(str(e)) logger.debug("Failed.") try: conn.close() except Exception: pass sys.exit(1)
def xsession(env): ENV.update(env) conn = connector( host=ENV["pg"]["socket_dir"], port=ENV["pg"]["port"], user=ENV["pg"]["user"], password=ENV["pg"]["password"], database="postgres", ) conn.connect() conn.close() status, res = temboard_request( ENV["agent"]["ssl_cert_file"], method="POST", url="https://{host}:{port}/login".format(**ENV["agent"]), headers={"Content-type": "application/json"}, data={ "username": ENV["agent"]["user"], "password": ENV["agent"]["password"], }, ) assert status == 200 return json.loads(res)["session"]
def get_discover(http_context, config=None, sessions=None): conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) logger.info('Starting discovery.') try: conn.connect() sysinfo = SysInfo() pginfo = PgInfo(conn) ret = { 'hostname': sysinfo.hostname(config.temboard['hostname']), 'cpu': sysinfo.n_cpu(), 'memory_size': sysinfo.memory_size(), 'pg_port': pginfo.setting('port'), 'pg_version': pginfo.version()['full'], 'pg_data': pginfo.setting('data_directory'), 'plugins': [plugin_name for plugin_name in config.temboard['plugins']] } conn.close() logger.info('Discovery done.') return ret except (error, Exception, HTTPError) as e: logger.exception(str(e)) logger.info('Discovery failed.') try: conn.close() except Exception: pass if isinstance(e, HTTPError): raise e else: raise HTTPError(500, "Internal error.")
def post_pg_control(http_context, queue_in = None, config = None, sessions = None, commands = None): # NOTE: in this case we don't want to use api functions wrapper, it leads # to "Broken pipe" error with debian init.d on start/restart. This is # probably due to getattr() call. set_logger_name("administration") # Get a new logger. logger = get_logger(config) check_sessionid(http_context['headers'], sessions) post = http_context['post'] # Check POST parameters. validate_parameters(post, [ ('action', T_CONTROL, False) ]) try: session = sessions.get_by_sessionid(http_context['headers']['X-Session'].encode('utf-8')) NotificationMgmt.push(config, Notification( username = session.username, message = "PostgreSQL %s" % (post['action']))) except NotificationError as e: logger.error(e.message) cmd_args = oneline_cmd_to_array(config.plugins['administration']['pg_ctl'] % (post['action'])) (rcode, stdout, stderr) = exec_script(cmd_args) if rcode != 0: raise HTTPError(408, str(stderr)) # Let's check if postgresql is up & running on 'start' or 'restart' action. if post['action'] in ['start', 'restart']: conn = connector( host = config.postgresql['host'], port = config.postgresql['port'], user = config.postgresql['user'], password = config.postgresql['password'], database = config.postgresql['dbname'] ) # When a start/restart operation is requested, after the startup/pg_ctl # script is executed we check that postgres is up & running: while the # PG connection is not working, during 10 seconds (max) we'll check # (connect/SELECT 1/disconnect) the connection, every 0.5 second. retry = True t_start = time.time() while retry: try: conn.connect() conn.execute('SELECT 1') conn.close() return {'action': post['action'], 'state': 'ok'} except error as e: if (time.time() - t_start) > 10: try: conn.close() except error as e: pass except Exception: pass return {'action': post['action'], 'state': 'ko'} time.sleep(0.5) elif post['action'] == 'stop': conn = connector( host = config.postgresql['host'], port = config.postgresql['port'], user = config.postgresql['user'], password = config.postgresql['password'], database = config.postgresql['dbname'] ) # Check the PG conn is not working anymore. try: retry = True t_start = time.time() while retry: conn.connect() conn.execute('SELECT 1') conn.close() time.sleep(0.5) if (time.time() - t_start) > 10: retry = False return {'action': post['action'], 'state': 'ko'} except error as e: return {'action': post['action'], 'state': 'ok'} return {'action': post['action'], 'state': 'ok'}
def load_plugins_configurations(config): """ Intend to load plugins and run their configuration() function. Plugins are defined as a module located in plugins/ directory. The list of plugins to load is set into temboard section of the configuration file: [temboard] plugins = [ "plugin1", "plugin2" ] """ # Get this module's path. path = os.path.dirname(__file__) ret = dict() # Get the logger. logger = get_logger(config) # PostgreSQL version pg_version = 0 while pg_version == 0: try: conn = connector( host = config.postgresql['host'], port = config.postgresql['port'], user = config.postgresql['user'], password = config.postgresql['password'], database = config.postgresql['dbname'] ) """ Trying to get PostgreSQL version number. """ conn.connect() pg_version = conn.get_pg_version() conn.close() except Exception as e: logger.traceback(get_tb()) logger.error(str(e)) logger.error("Not able to get PostgreSQL version number.") try: conn.close() except Exception: pass # If we reach this point, PostgreSQL is not available, so we # wait 5 seconds and try again if pg_version == 0: time.sleep(5) # Loop through each plugin listed in the configuration file. for plugin_name in config.temboard['plugins']: logger.info("Loading plugin '%s'." % (plugin_name,)) try: # Loading compat.py file fp_s, pathname_s, description_s = imp.find_module('compat', [path + '/plugins/'+plugin_name]) module_compat = imp.load_module('compat', fp_s, pathname_s, description_s) # Check modules's PG_MIN_VERSION try: if (module_compat.PG_MIN_VERSION > pg_version): # Version not supported logger.error("PostgreSQL version (%s) is not supported (min:%s)." % (pg_version, module_compat.PG_MIN_VERSION)) logger.info("Failed.") continue except ValueError as e: # PG_MIN_VERSION not set pass except Exception as e: if fp_s: fp_s.close() logger.info("Not able to load the compatibility file: compat.py.") logger.info("Done.") try: # Locate and load the module with imp. fp, pathname, description = imp.find_module(plugin_name, [path + '/plugins']) module = imp.load_module(plugin_name, fp, pathname, description) # Try to run module's configuration() function. logger.info("Loading plugin '%s' configuration." % (plugin_name)) plugin_configuration = getattr(module, 'configuration')(config) ret.update({module.__name__: plugin_configuration}) logger.info("Done.") except AttributeError as e: logger.info("No configuration.") except Exception as e: if fp: fp.close() logger.traceback(get_tb()) logger.error(str(e)) logger.info("Failed.") return ret
def post_pg_control(http_context, config=None, sessions=None): # NOTE: in this case we don't want to use api functions wrapper, it leads # to "Broken pipe" error with debian init.d script on start/restart. # This is probably due to getattr() call. post = http_context['post'] try: check_sessionid(http_context['headers'], sessions) # Check POST parameters. validate_parameters(post, [('action', T_CONTROL, False)]) session = sessions.get_by_sessionid( http_context['headers']['X-Session'].encode('utf-8')) except (Exception, HTTPError) as e: logger.exception(str(e)) logger.debug(http_context) if isinstance(e, HTTPError): raise e else: raise HTTPError(500, "Internal error.") try: NotificationMgmt.push( config, Notification(username=session.username, message="PostgreSQL %s" % post['action'])) except (NotificationError, Exception) as e: logger.exception(str(e)) try: logger.info("PostgreSQL '%s' requested." % (post['action'])) cmd_args = oneline_cmd_to_array( config.plugins['administration']['pg_ctl'] % (post['action'])) (rcode, stdout, stderr) = exec_script(cmd_args) if rcode != 0: raise Exception(str(stderr)) # Let's check if PostgreSQL is up & running after having executed # 'start' or 'restart' action. if post['action'] in ['start', 'restart']: conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) # When a start/restart operation is requested, after the # startup/pg_ctl script has been executed then we check that # postgres is up & running: # while the PG conn. is not working then, for 10 seconds (max) # we'll check (connect/SELECT 1/disconnect) the connection, every # 0.5 second. retry = True t_start = time.time() while retry: try: conn.connect() conn.execute('SELECT 1') conn.close() logger.info("Done.") return {'action': post['action'], 'state': 'ok'} except error: if (time.time() - t_start) > 10: try: conn.close() except error: pass except Exception: pass logger.info("Failed.") return {'action': post['action'], 'state': 'ko'} time.sleep(0.5) elif post['action'] == 'stop': conn = connector(host=config.postgresql['host'], port=config.postgresql['port'], user=config.postgresql['user'], password=config.postgresql['password'], database=config.postgresql['dbname']) # Check the PG conn is not working anymore. try: retry = True t_start = time.time() while retry: conn.connect() conn.execute('SELECT 1') conn.close() time.sleep(0.5) if (time.time() - t_start) > 10: retry = False logger.info("Failed.") return {'action': post['action'], 'state': 'ko'} except error: logger.info("Done.") return {'action': post['action'], 'state': 'ok'} logger.info("Done.") return {'action': post['action'], 'state': 'ok'} except (Exception, error, HTTPError) as e: logger.exception(str(e)) logger.info("Failed") if isinstance(e, HTTPError): raise e else: raise HTTPError(500, "Internal error.")
def instance_info(conninfo, hostname): """Gather PostgreSQL instance information.""" # Ensure we have all connection parameters if 'host' not in conninfo.keys(): host = os.environ.get('PGHOST') if host is None: # Debian has a non-default unix_socket_directory if os.path.exists('/etc/debian_version'): host = '/var/run/postgresql' else: host = '/tmp' else: host = conninfo['host'] if 'port' not in conninfo.keys(): port = os.environ.get('PGPORT') if port is None: port = 5432 else: port = conninfo['port'] if 'user' not in conninfo.keys(): user = os.environ.get('PGUSER') if user is None: user = pwd.getpwuid(os.geteuid()).pw_name else: user = conninfo['user'] if 'database' not in conninfo.keys(): database = os.environ.get('PGDATABASE') if database is None: database = user else: database = conninfo['database'] if 'password' not in conninfo.keys(): password = os.environ.get('PGPASSWORD') if password is None: # pgpass file handling try: pgpa_file = os.environ.get('PGPASSFILE') for (pgpa_h, pgpa_p, pgpa_d, pgpa_u, pgpa_pwd) in \ get_pgpass(pgpa_file): if (pgpa_h == host or pgpa_h == '*') and \ (pgpa_p == port or pgpa_p == '*') and \ (pgpa_u == user or pgpa_u == '*') and \ (pgpa_d == database or pgpa_d == '*'): password = pgpa_pwd continue except Exception as err: pass else: password = conninfo['password'] instance_info = { 'hostname': hostname, 'instance': conninfo['instance'], 'local_name': conninfo.get('local_name', conninfo['instance']), 'available': True, 'host': host, 'port': port, 'user': user, 'password': password, 'database': database, } # Try the connection conn = connector(host, port, user, password, database) try: conn.connect() # Gather the info while where are connected instance_info['version_num'] = conn.get_pg_version() conn.execute("SELECT setting FROM pg_settings WHERE name = 'server_version';") instance_info['version'] = list(conn.get_rows())[0]['setting'] conn.execute("SELECT setting FROM pg_settings WHERE name = 'data_directory';") instance_info['data_directory'] = list(conn.get_rows())[0]['setting'] # hot standby is available from 9.0 instance_info['standby'] = False if instance_info['version_num'] >= 90000: conn.execute("SELECT pg_is_in_recovery() AS standby;") if list(conn.get_rows())[0]['standby'] == 't': instance_info['standby'] = True # Grab the list of tablespaces if instance_info['version_num'] < 90200: conn.execute("SELECT spcname, spclocation FROM pg_tablespace;") else: conn.execute("SELECT spcname, pg_tablespace_location(oid) AS spclocation FROM pg_tablespace;") instance_info['tablespaces'] = [] fs = get_mount_points() for row in conn.get_rows(): # when spclocation is empty, replace with data_directory if row['spclocation'] is None: path = instance_info['data_directory'] else: path = row['spclocation'] # Include hostname and port to ease processing by ther server instance_info['tablespaces'].append({ 'hostname': hostname, 'port': instance_info['port'], 'spcname': row['spcname'], 'path': path, 'mount_point': find_mount_point(path, fs) }) # When the user has not given a dbnames list or '*' in the # configuration file, we must get the list of databases. Since # we have a working connection, let's do it now. conn.execute(""" SELECT datname AS dbname, pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE datallowconn; """) dbs = {} for r in conn.get_rows(): r['hostname'] = hostname r['port'] = instance_info['port'] dbs[r['dbname']] = r instance_info['dbnames'] = [] for db in conninfo['dbnames']: if db == '*': instance_info['dbnames'] = dbs.values() break if db in dbs.keys(): instance_info['dbnames'].append(dbs[db]) conn.close() # Now that we have the data_directory, find the owner try: statinfo = os.stat(instance_info['data_directory']) instance_info['sysuser'] = pwd.getpwuid(statinfo.st_uid).pw_name except OSError as e: logging.warning("Unable to get the owner of PGDATA: %s", str(e)) instance_info['sysuser'] = None except error as e: logging.warning("Unable to gather information for cluster \"%s\"", conninfo['instance']) instance_info['available'] = False return instance_info