def main(): global options if not psycopg2_available: print( 'Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue' ) sys.exit(254) options, args = parse_args() consts.TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join( get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print( 'Curses output is selected, but curses are unavailable, falling back to console output' ) output_method = OUTPUT_METHOD.console # set basic logging setup_logger(options) clusters = [] config = read_configuration( options.config_file) if options.config_file else None dbversion = None # configuration file takes priority over the rest of database connection information sources. if config: for instance in config: if options.instance and instance != options.instance: continue # pass already aquired connections to make sure we only list unique clusters. host = config[instance].get('host') port = config[instance].get('port') conn = build_connection(host, port, config[instance].get('user'), config[instance].get('dbname')) if not establish_user_defined_connection(instance, conn, clusters): logger.error( 'failed to acquire details about ' + 'the database cluster {0}, the server will be skipped'. format(instance)) elif options.host: # connect to the database using the connection string supplied from command-line conn = build_connection(options.host, options.port, options.username, options.dbname) instance = options.instance or "default" if not establish_user_defined_connection(instance, conn, clusters): logger.error( "unable to continue with cluster {0}".format(instance)) elif options.use_service and options.instance: # connect to the database using the service name if not establish_user_defined_connection( options.instance, {'service': options.instance}, clusters): logger.error("unable to continue with cluster {0}".format( options.instance)) else: # do autodetection postmasters = get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, data in postmasters.items(): (ppid, dbversion, dbname) = data # if user requested a specific database name and version - don't try to connect to others if options.instance: if dbname != options.instance or not result_work_dir or not ppid: continue if options.version is not None and dbversion != options.version: continue try: conndata = detect_db_connection_arguments( result_work_dir, ppid, dbversion, options.username, options.dbname) if conndata is None: continue host = conndata['host'] port = conndata['port'] conn = build_connection(host, port, options.username, options.dbname) pgcon = psycopg2.connect(**conn) except Exception as e: logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir, pid=ppid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if len(clusters) == 0: logger.error( 'No suitable PostgreSQL instances detected, exiting...') logger.error( 'hint: use -v for details, ' + 'or specify connection parameters manually in the configuration file (-c)' ) sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] dbversion = dbversion or clusters[0]['ver'] collector = DetachedDiskStatCollector(q, work_directories, dbversion) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cl in clusters: part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer) pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid) groupname = cl['wd'] groups[groupname] = {'pg': pg, 'partitions': part} collectors.append(part) collectors.append(pg) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. disable_logging_to_stderr() loop(collectors, consumer, groups, output_method) enable_logging_to_stderr() except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print( 'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh' ) except: print(traceback.format_exc()) finally: sys.exit(0)
def main(): global options # bail out if we are not running Linux if platform.system() != 'Linux': print( 'Non Linux database hosts are not supported at the moment. Can not continue' ) sys.exit(243) options, args = parse_args() consts.TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join( get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print( 'Curses output is selected, but curses are unavailable, falling back to console output' ) output_method = OUTPUT_METHOD.console log_stderr = setup_loggers(options) user_dbname = options.instance user_dbver = options.version clusters = [] # now try to read the configuration file config = (read_configuration(options.config_file) if options.config_file else None) if config: for instance in config: if user_dbname and instance != user_dbname: continue # pass already aquired connections to make sure we only list unique clusters. db_client = DBClient.from_config(config[instance]) try: cluster = db_client.establish_user_defined_connection( instance, clusters) except (NotConnectedError, NoPidConnectionError): msg = 'failed to acquire details about the database cluster {0}, the server will be skipped' loggers.logger.error(msg.format(instance)) except DuplicatedConnectionError: pass else: clusters.append(cluster) elif options.host: # try to connect to the database specified by command-line options instance = options.instance or "default" db_client = DBClient.from_options(options) try: cluster = db_client.establish_user_defined_connection( instance, clusters) except (NotConnectedError, NoPidConnectionError): loggers.logger.error( "unable to continue with cluster {0}".format(instance)) except DuplicatedConnectionError: pass else: clusters.append(cluster) else: # do autodetection postmasters = ProcWorker().get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, connection_params in postmasters.items(): # if user requested a specific database name and version - don't try to connect to others try: validate_autodetected_conn_param(user_dbname, user_dbver, result_work_dir, connection_params) except InvalidConnectionParamError: continue db_client = DBClient.from_postmasters(result_work_dir, connection_params.pid, connection_params.version, options) if db_client is None: continue conn = db_client.connection_builder.build_connection() try: pgcon = psycopg2.connect(**conn) except Exception as e: loggers.logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=connection_params.dbname, version=connection_params.version, workdir=result_work_dir, pid=connection_params.pid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if not clusters: loggers.logger.error( 'No suitable PostgreSQL instances detected, exiting...') loggers.logger.error( 'hint: use -v for details, or specify connection parameters ' 'manually in the configuration file (-c)') sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] collector = DetachedDiskStatCollector(q, work_directories) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cluster in clusters: partition_collector = PartitionStatCollector.from_cluster( cluster, consumer) pg_collector = PgStatCollector.from_cluster(cluster, options.pid) groups[cluster['wd']] = { 'pg': pg_collector, 'partitions': partition_collector } collectors.append(partition_collector) collectors.append(pg_collector) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. loggers.logger.removeHandler(log_stderr) loop(collectors, consumer, groups, output_method) loggers.logger.addHandler(log_stderr) except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print( 'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh' ) except: print(traceback.format_exc()) finally: sys.exit(0)
def test_output_method_is_valid_should_return_false_when_invalid(self): ALLOWED_OUTPUTS = ['test', 'foo', 1] for output in ALLOWED_OUTPUTS: self.assertFalse(output_method_is_valid(output))
def main(): global options # bail out if we are not running Linux if platform.system() != 'Linux': print('Non Linux database hosts are not supported at the moment. Can not continue') sys.exit(243) if not psycopg2_available: print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue') sys.exit(254) options, args = parse_args() consts.TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join(get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print('Curses output is selected, but curses are unavailable, falling back to console output') output_method = OUTPUT_METHOD.console # set basic logging setup_logger(options) clusters = [] config = read_configuration(options.config_file) if options.config_file else None dbversion = None # configuration file takes priority over the rest of database connection information sources. if config: for instance in config: if options.instance and instance != options.instance: continue # pass already aquired connections to make sure we only list unique clusters. host = config[instance].get('host') port = config[instance].get('port') conn = build_connection(host, port, config[instance].get('user'), config[instance].get('dbname')) if not establish_user_defined_connection(instance, conn, clusters): logger.error('failed to acquire details about ' + 'the database cluster {0}, the server will be skipped'.format(instance)) elif options.host: # connect to the database using the connection string supplied from command-line conn = build_connection(options.host, options.port, options.username, options.dbname) instance = options.instance or "default" if not establish_user_defined_connection(instance, conn, clusters): logger.error("unable to continue with cluster {0}".format(instance)) elif options.use_service and options.instance: # connect to the database using the service name if not establish_user_defined_connection(options.instance, {'service': options.instance}, clusters): logger.error("unable to continue with cluster {0}".format(options.instance)) else: # do autodetection postmasters = get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, data in postmasters.items(): (ppid, dbversion, dbname) = data # if user requested a specific database name and version - don't try to connect to others if options.instance: if dbname != options.instance or not result_work_dir or not ppid: continue if options.version is not None and dbversion != options.version: continue try: conndata = detect_db_connection_arguments( result_work_dir, ppid, dbversion, options.username, options.dbname) if conndata is None: continue host = conndata['host'] port = conndata['port'] conn = build_connection(host, port, options.username, options.dbname) pgcon = psycopg2.connect(**conn) except Exception as e: logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir, pid=ppid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if len(clusters) == 0: logger.error('No suitable PostgreSQL instances detected, exiting...') logger.error('hint: use -v for details, ' + 'or specify connection parameters manually in the configuration file (-c)') sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] dbversion = dbversion or clusters[0]['ver'] collector = DetachedDiskStatCollector(q, work_directories, dbversion) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cl in clusters: part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer) pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid) groupname = cl['wd'] groups[groupname] = {'pg': pg, 'partitions': part} collectors.append(part) collectors.append(pg) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. disable_logging_to_stderr() loop(collectors, consumer, groups, output_method) enable_logging_to_stderr() except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print('Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh') except: print(traceback.format_exc()) finally: sys.exit(0)
def test_output_method_is_valid_should_return_true_when_valid(self): ALLOWED_OUTPUTS = ['console', 'json', 'curses'] for output in ALLOWED_OUTPUTS: self.assertTrue(output_method_is_valid(output))