def main(): global options if not psycopg2_available: print( 'Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue' ) sys.exit(254) options, args = parse_args() consts.TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join( get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print( 'Curses output is selected, but curses are unavailable, falling back to console output' ) output_method = OUTPUT_METHOD.console # set basic logging setup_logger(options) clusters = [] config = read_configuration( options.config_file) if options.config_file else None dbversion = None # configuration file takes priority over the rest of database connection information sources. if config: for instance in config: if options.instance and instance != options.instance: continue # pass already aquired connections to make sure we only list unique clusters. host = config[instance].get('host') port = config[instance].get('port') conn = build_connection(host, port, config[instance].get('user'), config[instance].get('dbname')) if not establish_user_defined_connection(instance, conn, clusters): logger.error( 'failed to acquire details about ' + 'the database cluster {0}, the server will be skipped'. format(instance)) elif options.host: # connect to the database using the connection string supplied from command-line conn = build_connection(options.host, options.port, options.username, options.dbname) instance = options.instance or "default" if not establish_user_defined_connection(instance, conn, clusters): logger.error( "unable to continue with cluster {0}".format(instance)) elif options.use_service and options.instance: # connect to the database using the service name if not establish_user_defined_connection( options.instance, {'service': options.instance}, clusters): logger.error("unable to continue with cluster {0}".format( options.instance)) else: # do autodetection postmasters = get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, data in postmasters.items(): (ppid, dbversion, dbname) = data # if user requested a specific database name and version - don't try to connect to others if options.instance: if dbname != options.instance or not result_work_dir or not ppid: continue if options.version is not None and dbversion != options.version: continue try: conndata = detect_db_connection_arguments( result_work_dir, ppid, dbversion, options.username, options.dbname) if conndata is None: continue host = conndata['host'] port = conndata['port'] conn = build_connection(host, port, options.username, options.dbname) pgcon = psycopg2.connect(**conn) except Exception as e: logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir, pid=ppid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if len(clusters) == 0: logger.error( 'No suitable PostgreSQL instances detected, exiting...') logger.error( 'hint: use -v for details, ' + 'or specify connection parameters manually in the configuration file (-c)' ) sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] dbversion = dbversion or clusters[0]['ver'] collector = DetachedDiskStatCollector(q, work_directories, dbversion) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cl in clusters: part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer) pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid) groupname = cl['wd'] groups[groupname] = {'pg': pg, 'partitions': part} collectors.append(part) collectors.append(pg) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. disable_logging_to_stderr() loop(collectors, consumer, groups, output_method) enable_logging_to_stderr() except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print( 'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh' ) except: print(traceback.format_exc()) finally: sys.exit(0)
def setUp(self): self.collector = SystemStatCollector() super(SystemStatCollectorTest, self).setUp()
def main(): global options # bail out if we are not running Linux if platform.system() != 'Linux': print( 'Non Linux database hosts are not supported at the moment. Can not continue' ) sys.exit(243) options, args = parse_args() consts.TICK_LENGTH = options.tick output_method = options.output_method if not output_method_is_valid(output_method): print('Unsupported output method: {0}'.format(output_method)) print('Valid output methods are: {0}'.format(','.join( get_valid_output_methods()))) sys.exit(1) if output_method == OUTPUT_METHOD.curses and not curses_available: print( 'Curses output is selected, but curses are unavailable, falling back to console output' ) output_method = OUTPUT_METHOD.console log_stderr = setup_loggers(options) user_dbname = options.instance user_dbver = options.version clusters = [] # now try to read the configuration file config = (read_configuration(options.config_file) if options.config_file else None) if config: for instance in config: if user_dbname and instance != user_dbname: continue # pass already aquired connections to make sure we only list unique clusters. db_client = DBClient.from_config(config[instance]) try: cluster = db_client.establish_user_defined_connection( instance, clusters) except (NotConnectedError, NoPidConnectionError): msg = 'failed to acquire details about the database cluster {0}, the server will be skipped' loggers.logger.error(msg.format(instance)) except DuplicatedConnectionError: pass else: clusters.append(cluster) elif options.host: # try to connect to the database specified by command-line options instance = options.instance or "default" db_client = DBClient.from_options(options) try: cluster = db_client.establish_user_defined_connection( instance, clusters) except (NotConnectedError, NoPidConnectionError): loggers.logger.error( "unable to continue with cluster {0}".format(instance)) except DuplicatedConnectionError: pass else: clusters.append(cluster) else: # do autodetection postmasters = ProcWorker().get_postmasters_directories() # get all PostgreSQL instances for result_work_dir, connection_params in postmasters.items(): # if user requested a specific database name and version - don't try to connect to others try: validate_autodetected_conn_param(user_dbname, user_dbver, result_work_dir, connection_params) except InvalidConnectionParamError: continue db_client = DBClient.from_postmasters(result_work_dir, connection_params.pid, connection_params.version, options) if db_client is None: continue conn = db_client.connection_builder.build_connection() try: pgcon = psycopg2.connect(**conn) except Exception as e: loggers.logger.error('PostgreSQL exception {0}'.format(e)) pgcon = None if pgcon: desc = make_cluster_desc(name=connection_params.dbname, version=connection_params.version, workdir=result_work_dir, pid=connection_params.pid, pgcon=pgcon, conn=conn) clusters.append(desc) collectors = [] groups = {} try: if not clusters: loggers.logger.error( 'No suitable PostgreSQL instances detected, exiting...') loggers.logger.error( 'hint: use -v for details, or specify connection parameters ' 'manually in the configuration file (-c)') sys.exit(1) # initialize the disks stat collector process and create an exchange queue q = JoinableQueue(1) work_directories = [cl['wd'] for cl in clusters if 'wd' in cl] collector = DetachedDiskStatCollector(q, work_directories) collector.start() consumer = DiskCollectorConsumer(q) collectors.append(HostStatCollector()) collectors.append(SystemStatCollector()) collectors.append(MemoryStatCollector()) for cluster in clusters: partition_collector = PartitionStatCollector.from_cluster( cluster, consumer) pg_collector = PgStatCollector.from_cluster(cluster, options.pid) groups[cluster['wd']] = { 'pg': pg_collector, 'partitions': partition_collector } collectors.append(partition_collector) collectors.append(pg_collector) # we don't want to mix diagnostics messages with useful output, so we log the former into a file. loggers.logger.removeHandler(log_stderr) loop(collectors, consumer, groups, output_method) loggers.logger.addHandler(log_stderr) except KeyboardInterrupt: pass except curses.error: print(traceback.format_exc()) if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ: print( 'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh' ) except: print(traceback.format_exc()) finally: sys.exit(0)
class SystemStatCollectorTest(TestCase): def setUp(self): self.collector = SystemStatCollector() super(SystemStatCollectorTest, self).setUp() @unittest.skip('psutil') def test_refresh_should_contain_proper_data_keys(self): refreshed_data = self.collector.refresh() self.assertIn('stime', refreshed_data) self.assertIn('softirq', refreshed_data) self.assertIn('iowait', refreshed_data) self.assertIn('idle', refreshed_data) self.assertIn('ctxt', refreshed_data) self.assertIn('running', refreshed_data) self.assertIn('blocked', refreshed_data) self.assertIn('guest', refreshed_data) self.assertIn('irq', refreshed_data) self.assertIn('utime', refreshed_data) self.assertIn('steal', refreshed_data) @unittest.skip('psutil') @mock.patch( 'pg_view.collectors.system_collector.SystemStatCollector._refresh_cpu_time_values' ) @mock.patch( 'pg_view.collectors.system_collector.SystemStatCollector._do_refresh') @mock.patch( 'pg_view.collectors.system_collector.SystemStatCollector.read_cpu_stats' ) @mock.patch( 'pg_view.collectors.system_collector.SystemStatCollector.read_cpu_times' ) def test_refresh_should_call_helpers_with_proper_data( self, mocked_read_cpu_times, mocked_read_proc_stat, mocked__do_refresh, mocked__refresh_cpu_time_values): cpu_stats = { 'cpu': [ '46535', '0', '40348', '8412642', '188', '1', '2020', '0', '0', '0' ], 'blocked': 0, 'ctxt': 11530476.0, 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, 'running': 1, 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 } cpu_times = { 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 } mocked_read_proc_stat.return_value = cpu_stats mocked_read_cpu_times.return_value = cpu_times merged_data = dict(cpu_times, **cpu_stats) self.collector.refresh() mocked__refresh_cpu_time_values.assert_called_once_with(cpu_times) mocked__do_refresh.assert_called_once_with([merged_data]) @unittest.skip('psutil') @unittest.skipUnless(psutil.LINUX, "Linux only") @mock.patch( 'pg_view.collectors.system_collector.psutil._pslinux.open_binary') def test_get_missing_cpu_stat_from_file_should_parse_data_from_proc_stat( self, mocked_open): cpu_info_ok = os.path.join(TEST_DIR, 'proc_files', 'cpu_info_ok') mocked_open.return_value = open(cpu_info_ok, "rb") refreshed_data = self.collector.get_missing_cpu_stat_from_file() self.assertEqual({ b'procs_blocked': 0, b'procs_running': 1 }, refreshed_data) @unittest.skip('psutil') @mock.patch('pg_view.collectors.system_collector.psutil.cpu_times') def test_read_cpu_data_should_transform_input_when_cpu_times_for_linux( self, mocked_cpu_times): linux_scputimes = namedtuple( 'scputimes', 'user nice system idle iowait irq softirq steal guest') mocked_cpu_times.return_value = linux_scputimes(user=848.31, nice=0.0, system=775.15, idle=105690.03, iowait=2.05, irq=0.01, softirq=54.83, steal=0.0, guest=0.0) refreshed_cpu = self.collector.read_cpu_times() expected_data = { 'guest': 0.0, 'idle': 105690.03, 'iowait': 2.05, 'irq': 0.01, 'softirq': 54.83, 'steal': 0.0, 'stime': 775.15, 'utime': 848.31 } self.assertEqual(expected_data, refreshed_cpu) @unittest.skip('psutil') @mock.patch('pg_view.collectors.system_collector.psutil.cpu_times') def test_read_cpu_data_should_transform_input_when_cpu_times_for_macos( self, mocked_cpu_times): macos_scputimes = namedtuple('scputimes', 'user system idle') mocked_cpu_times.return_value = macos_scputimes(user=49618.61, system=28178.55, idle=341331.57) refreshed_cpu = self.collector.read_cpu_times() expected_data = { 'guest': 0.0, 'idle': 341331.57, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, 'steal': 0.0, 'stime': 28178.55, 'utime': 49618.61 } self.assertEqual(expected_data, refreshed_cpu) @unittest.skip('psutil') @mock.patch('pg_view.collectors.system_collector.psutil.cpu_stats') @mock.patch('pg_view.collectors.system_collector.psutil.LINUX', False) def test_read_cpu_data_should_transform_input_when_cpu_stats_for_macos( self, mocked_cpu_times): macos_scpustats = namedtuple( 'scpustats', 'ctx_switches interrupts soft_interrupts syscalls') mocked_cpu_times.return_value = macos_scpustats( ctx_switches=12100, interrupts=888823, soft_interrupts=211467872, syscalls=326368) refreshed_cpu = self.collector.read_cpu_stats() expected_data = {'running': 0.0, 'ctxt': 12100, 'blocked': 0.0} self.assertEqual(expected_data, refreshed_cpu) @unittest.skip('psutil') @mock.patch('pg_view.collectors.system_collector.psutil.cpu_stats') @mock.patch('pg_view.collectors.system_collector.psutil.LINUX', True) @mock.patch( 'pg_view.collectors.system_collector.SystemStatCollector.get_missing_cpu_stat_from_file' ) def test_read_cpu_data_should_transform_input_when_cpu_stats_for_linux( self, mocked_get_missing_cpu_stat_from_file, mocked_cpu_times): linux_scpu_stats = namedtuple( 'scpustats', 'ctx_switches interrupts soft_interrupts syscalls') mocked_get_missing_cpu_stat_from_file.return_value = { 'procs_running': 10, 'procs_blocked': 20, } mocked_cpu_times.return_value = linux_scpu_stats( ctx_switches=12100, interrupts=888823, soft_interrupts=211467872, syscalls=326368) refreshed_cpu = self.collector.read_cpu_stats() expected_data = {'running': 10.0, 'ctxt': 12100, 'blocked': 20.0} self.assertEqual(expected_data, refreshed_cpu) mocked_get_missing_cpu_stat_from_file.assert_called_with() def test__refresh_cpu_time_values_should_update_cpu_when_ok(self): cpu_data = { 'guest': 0.0, 'idle': 8412642.0, 'iowait': 188.0, 'irq': 1.0, 'softirq': 2020.0, 'steal': 0.0, 'stime': 40348.0, 'utime': 46535.0 } self.collector.current_total_cpu_time = 1.0 self.collector._refresh_cpu_time_values(cpu_data) self.assertEqual(1.0, self.collector.previos_total_cpu_time) self.assertEqual(8501734.0, self.collector.current_total_cpu_time) self.assertEqual(8501733.0, self.collector.cpu_time_diff) def test__cpu_time_diff_should_return_none_when_cpu_time_diff_zero(self): current = { 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, 'utime': 292.11, 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 } previous = { 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, 'utime': 291.99, 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 } self.collector.cpu_time_diff = 0 self.assertIsNone( self.collector._cpu_time_diff('utime', current, previous)) def test__cpu_time_diff_should_return_none_when_no_colname_in_data(self): current = { 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 } previous = { 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 } self.collector.cpu_time_diff = 1 self.assertIsNone( self.collector._cpu_time_diff('utime', current, previous)) def test__cpu_time_diff_should_return_diff_when_ok(self): current = { 'guest': 0.0, 'irq': 0.0, 'running': 1, 'idle': 75211.11, 'stime': 209.64, 'iowait': 1.71, 'blocked': 0, 'utime': 293, 'steal': 0.0, 'ctxt': 6595374, 'softirq': 9.0 } previous = { 'guest': 0.0, 'irq': 0.0, 'running': 2, 'idle': 75210.22, 'stime': 209.6, 'iowait': 1.71, 'blocked': 0, 'utime': 292, 'steal': 0.0, 'ctxt': 6594493, 'softirq': 8.99 } self.collector.cpu_time_diff = 1 self.assertEqual( 1, self.collector._cpu_time_diff('utime', current, previous))