def test_get_mounted_device_should_return_dev_when_device_on_pathname(
         self, mocked_disk_partitions):
     device = mock.Mock(mountpoint='/', device='sda1')
     mocked_disk_partitions.return_value = [device]
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     mounted_device = detached_disk.get_mounted_device('/')
     self.assertEqual('sda1', mounted_device)
 def test_get_du_data_should_log_error_when_run_du_raises_exception(
         self, mocked_logger, mocked_run_du):
     mocked_run_du.side_effect = Exception
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     detached_disk.get_du_data('/var/lib/postgresql/9.3/main')
     expected_msg = 'Unable to read free space information for the pg_xlog and data directories for the directory ' \
                    '/var/lib/postgresql/9.3/main: '
     mocked_logger.error.assert_called_with(expected_msg)
 def test_get_df_data_should_return_proper_data_when_data_dev_and_xlog_dev_are_equal(
         self, mocked_os_statvfs, mocked_get_mounted_device,
         mocked_get_mount_point):
     seq = (4096, 4096, 10312784, 9823692, 9389714, 2621440, 2537942,
            2537942, 4096, 255)
     mocked_os_statvfs.return_value = posix.statvfs_result(sequence=seq)
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     df_data = detached_disk.get_df_data('/var/lib/postgresql/9.3/main')
     expected_df_data = {
         'data': ('/dev/sda1', 41251136, 37558856),
         'xlog': ('/dev/sda1', 41251136, 37558856)
     }
     self.assertEqual(expected_df_data, df_data)
 def test_get_du_data_should_run_du_when_work_directory(
         self, mocked_run_du):
     mocked_run_du.side_effect = [35628, 35620]
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     result = detached_disk.get_du_data('/var/lib/postgresql/9.3/main')
     expected_result = {
         'xlog': ('35620', '/var/lib/postgresql/9.3/main/pg_xlog'),
         'data': ('35628', '/var/lib/postgresql/9.3/main')
     }
     self.assertEqual(expected_result, result)
     mocked_run_du.assert_has_calls([
         mock.call('/var/lib/postgresql/9.3/main'),
         mock.call('/var/lib/postgresql/9.3/main/pg_xlog/')
     ])
 def test__get_or_update_df_cache_should_call_os_statvfs_when_empty_cache(
         self, mocked_os_statvfs):
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     df_data = detached_disk._get_or_update_df_cache(
         '/var/lib/postgresql/9.3/main', '/sda/dev1')
     self.assertEqual((
         4096,
         4096,
     ), df_data)
     self.assertEqual((
         4096,
         4096,
     ), detached_disk.df_cache['/sda/dev1'])
     mocked_os_statvfs.assert_called_once_with(
         '/var/lib/postgresql/9.3/main')
 def test__get_or_update_df_cache_should_get_from_cache_when_entry_exists(
         self, mocked_os_statvfs):
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     detached_disk.df_cache = {
         '/sda/dev1': (
             4096,
             4096,
         )
     }
     df_data = detached_disk._get_or_update_df_cache(
         '/var/lib/postgresql/9.3/main', '/sda/dev1')
     self.assertEqual((
         4096,
         4096,
     ), df_data)
     mocked_os_statvfs.assert_not_called()
    def test_run_should_loop_forever_processing_both_collectors(
            self, mocked_get_df_data, mocked_get_du_data):
        mocked_get_du_data.side_effect = ErrorAfter(1)
        mocked_get_df_data.side_effect = ErrorAfter(1)
        queue = mock.Mock()
        detached_disk = DetachedDiskStatCollector(
            queue, ['/var/lib/postgresql/9.3/main'])
        with self.assertRaises(CallableExhaustedError):
            detached_disk.run()

        mocked_get_du_data.assert_called_with('/var/lib/postgresql/9.3/main')
        mocked_get_df_data.assert_called_with('/var/lib/postgresql/9.3/main')
        queue.put.assert_called_once_with({
            '/var/lib/postgresql/9.3/main':
            [('/var/lib/postgresql/9.3/main', ),
             ('/var/lib/postgresql/9.3/main', )]
        })
示例#8
0
def main():
    global options

    if not psycopg2_available:
        print(
            'Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue'
        )
        sys.exit(254)

    options, args = parse_args()
    consts.TICK_LENGTH = options.tick

    output_method = options.output_method

    if not output_method_is_valid(output_method):
        print('Unsupported output method: {0}'.format(output_method))
        print('Valid output methods are: {0}'.format(','.join(
            get_valid_output_methods())))
        sys.exit(1)

    if output_method == OUTPUT_METHOD.curses and not curses_available:
        print(
            'Curses output is selected, but curses are unavailable, falling back to console output'
        )
        output_method = OUTPUT_METHOD.console

    # set basic logging
    setup_logger(options)

    clusters = []

    config = read_configuration(
        options.config_file) if options.config_file else None
    dbversion = None
    # configuration file takes priority over the rest of database connection information sources.
    if config:
        for instance in config:
            if options.instance and instance != options.instance:
                continue
            # pass already aquired connections to make sure we only list unique clusters.
            host = config[instance].get('host')
            port = config[instance].get('port')
            conn = build_connection(host, port, config[instance].get('user'),
                                    config[instance].get('dbname'))

            if not establish_user_defined_connection(instance, conn, clusters):
                logger.error(
                    'failed to acquire details about ' +
                    'the database cluster {0}, the server will be skipped'.
                    format(instance))
    elif options.host:
        # connect to the database using the connection string supplied from command-line
        conn = build_connection(options.host, options.port, options.username,
                                options.dbname)
        instance = options.instance or "default"
        if not establish_user_defined_connection(instance, conn, clusters):
            logger.error(
                "unable to continue with cluster {0}".format(instance))
    elif options.use_service and options.instance:
        # connect to the database using the service name
        if not establish_user_defined_connection(
                options.instance, {'service': options.instance}, clusters):
            logger.error("unable to continue with cluster {0}".format(
                options.instance))
    else:
        # do autodetection
        postmasters = get_postmasters_directories()

        # get all PostgreSQL instances
        for result_work_dir, data in postmasters.items():
            (ppid, dbversion, dbname) = data
            # if user requested a specific database name and version - don't try to connect to others
            if options.instance:
                if dbname != options.instance or not result_work_dir or not ppid:
                    continue
                if options.version is not None and dbversion != options.version:
                    continue
            try:
                conndata = detect_db_connection_arguments(
                    result_work_dir, ppid, dbversion, options.username,
                    options.dbname)
                if conndata is None:
                    continue
                host = conndata['host']
                port = conndata['port']
                conn = build_connection(host, port, options.username,
                                        options.dbname)
                pgcon = psycopg2.connect(**conn)
            except Exception as e:
                logger.error('PostgreSQL exception {0}'.format(e))
                pgcon = None
            if pgcon:
                desc = make_cluster_desc(name=dbname,
                                         version=dbversion,
                                         workdir=result_work_dir,
                                         pid=ppid,
                                         pgcon=pgcon,
                                         conn=conn)
                clusters.append(desc)
    collectors = []
    groups = {}
    try:
        if len(clusters) == 0:
            logger.error(
                'No suitable PostgreSQL instances detected, exiting...')
            logger.error(
                'hint: use -v for details, ' +
                'or specify connection parameters manually in the configuration file (-c)'
            )
            sys.exit(1)

        # initialize the disks stat collector process and create an exchange queue
        q = JoinableQueue(1)
        work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]
        dbversion = dbversion or clusters[0]['ver']

        collector = DetachedDiskStatCollector(q, work_directories, dbversion)
        collector.start()
        consumer = DiskCollectorConsumer(q)

        collectors.append(HostStatCollector())
        collectors.append(SystemStatCollector())
        collectors.append(MemoryStatCollector())
        for cl in clusters:
            part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'],
                                          consumer)
            pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'],
                                 cl['name'], cl['ver'], options.pid)
            groupname = cl['wd']
            groups[groupname] = {'pg': pg, 'partitions': part}
            collectors.append(part)
            collectors.append(pg)

        # we don't want to mix diagnostics messages with useful output, so we log the former into a file.
        disable_logging_to_stderr()
        loop(collectors, consumer, groups, output_method)
        enable_logging_to_stderr()
    except KeyboardInterrupt:
        pass
    except curses.error:
        print(traceback.format_exc())
        if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ:
            print(
                'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh'
            )
    except:
        print(traceback.format_exc())
    finally:
        sys.exit(0)
示例#9
0
文件: view.py 项目: rsiera/pg_view
def main():
    global options

    # bail out if we are not running Linux
    if platform.system() != 'Linux':
        print(
            'Non Linux database hosts are not supported at the moment. Can not continue'
        )
        sys.exit(243)

    options, args = parse_args()
    consts.TICK_LENGTH = options.tick
    output_method = options.output_method

    if not output_method_is_valid(output_method):
        print('Unsupported output method: {0}'.format(output_method))
        print('Valid output methods are: {0}'.format(','.join(
            get_valid_output_methods())))
        sys.exit(1)

    if output_method == OUTPUT_METHOD.curses and not curses_available:
        print(
            'Curses output is selected, but curses are unavailable, falling back to console output'
        )
        output_method = OUTPUT_METHOD.console

    log_stderr = setup_loggers(options)
    user_dbname = options.instance
    user_dbver = options.version
    clusters = []

    # now try to read the configuration file
    config = (read_configuration(options.config_file)
              if options.config_file else None)
    if config:
        for instance in config:
            if user_dbname and instance != user_dbname:
                continue
            # pass already aquired connections to make sure we only list unique clusters.
            db_client = DBClient.from_config(config[instance])
            try:
                cluster = db_client.establish_user_defined_connection(
                    instance, clusters)
            except (NotConnectedError, NoPidConnectionError):
                msg = 'failed to acquire details about the database cluster {0}, the server will be skipped'
                loggers.logger.error(msg.format(instance))
            except DuplicatedConnectionError:
                pass
            else:
                clusters.append(cluster)

    elif options.host:
        # try to connect to the database specified by command-line options
        instance = options.instance or "default"
        db_client = DBClient.from_options(options)
        try:
            cluster = db_client.establish_user_defined_connection(
                instance, clusters)
        except (NotConnectedError, NoPidConnectionError):
            loggers.logger.error(
                "unable to continue with cluster {0}".format(instance))
        except DuplicatedConnectionError:
            pass
        else:
            clusters.append(cluster)
    else:
        # do autodetection
        postmasters = ProcWorker().get_postmasters_directories()
        # get all PostgreSQL instances
        for result_work_dir, connection_params in postmasters.items():
            # if user requested a specific database name and version - don't try to connect to others
            try:
                validate_autodetected_conn_param(user_dbname, user_dbver,
                                                 result_work_dir,
                                                 connection_params)
            except InvalidConnectionParamError:
                continue
            db_client = DBClient.from_postmasters(result_work_dir,
                                                  connection_params.pid,
                                                  connection_params.version,
                                                  options)
            if db_client is None:
                continue
            conn = db_client.connection_builder.build_connection()
            try:
                pgcon = psycopg2.connect(**conn)
            except Exception as e:
                loggers.logger.error('PostgreSQL exception {0}'.format(e))
                pgcon = None
            if pgcon:
                desc = make_cluster_desc(name=connection_params.dbname,
                                         version=connection_params.version,
                                         workdir=result_work_dir,
                                         pid=connection_params.pid,
                                         pgcon=pgcon,
                                         conn=conn)
                clusters.append(desc)

    collectors = []
    groups = {}
    try:
        if not clusters:
            loggers.logger.error(
                'No suitable PostgreSQL instances detected, exiting...')
            loggers.logger.error(
                'hint: use -v for details, or specify connection parameters '
                'manually in the configuration file (-c)')
            sys.exit(1)

        # initialize the disks stat collector process and create an exchange queue
        q = JoinableQueue(1)
        work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]

        collector = DetachedDiskStatCollector(q, work_directories)
        collector.start()
        consumer = DiskCollectorConsumer(q)

        collectors.append(HostStatCollector())
        collectors.append(SystemStatCollector())
        collectors.append(MemoryStatCollector())

        for cluster in clusters:
            partition_collector = PartitionStatCollector.from_cluster(
                cluster, consumer)
            pg_collector = PgStatCollector.from_cluster(cluster, options.pid)

            groups[cluster['wd']] = {
                'pg': pg_collector,
                'partitions': partition_collector
            }
            collectors.append(partition_collector)
            collectors.append(pg_collector)

        # we don't want to mix diagnostics messages with useful output, so we log the former into a file.
        loggers.logger.removeHandler(log_stderr)
        loop(collectors, consumer, groups, output_method)
        loggers.logger.addHandler(log_stderr)
    except KeyboardInterrupt:
        pass
    except curses.error:
        print(traceback.format_exc())
        if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ:
            print(
                'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh'
            )
    except:
        print(traceback.format_exc())
    finally:
        sys.exit(0)
示例#10
0
 def test_get_mounted_device_should_return_none_when_no_device_on_pathname(
         self, mocked_disk_partitions):
     detached_disk = DetachedDiskStatCollector(
         mock.Mock(), ['/var/lib/postgresql/9.3/main'])
     mounted_device = detached_disk.get_mounted_device('/test')
     self.assertIsNone(mounted_device)
示例#11
0
文件: __init__.py 项目: kmatt/pg_view
def main():
    global options

    # bail out if we are not running Linux
    if platform.system() != 'Linux':
        print('Non Linux database hosts are not supported at the moment. Can not continue')
        sys.exit(243)

    if not psycopg2_available:
        print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue')
        sys.exit(254)

    options, args = parse_args()
    consts.TICK_LENGTH = options.tick

    output_method = options.output_method

    if not output_method_is_valid(output_method):
        print('Unsupported output method: {0}'.format(output_method))
        print('Valid output methods are: {0}'.format(','.join(get_valid_output_methods())))
        sys.exit(1)

    if output_method == OUTPUT_METHOD.curses and not curses_available:
        print('Curses output is selected, but curses are unavailable, falling back to console output')
        output_method = OUTPUT_METHOD.console

    # set basic logging
    setup_logger(options)

    clusters = []

    config = read_configuration(options.config_file) if options.config_file else None
    dbversion = None
    # configuration file takes priority over the rest of database connection information sources.
    if config:
        for instance in config:
            if options.instance and instance != options.instance:
                continue
            # pass already aquired connections to make sure we only list unique clusters.
            host = config[instance].get('host')
            port = config[instance].get('port')
            conn = build_connection(host, port,
                                    config[instance].get('user'), config[instance].get('dbname'))

            if not establish_user_defined_connection(instance, conn, clusters):
                logger.error('failed to acquire details about ' +
                             'the database cluster {0}, the server will be skipped'.format(instance))
    elif options.host:
        # connect to the database using the connection string supplied from command-line
        conn = build_connection(options.host, options.port, options.username, options.dbname)
        instance = options.instance or "default"
        if not establish_user_defined_connection(instance, conn, clusters):
            logger.error("unable to continue with cluster {0}".format(instance))
    elif options.use_service and options.instance:
        # connect to the database using the service name
        if not establish_user_defined_connection(options.instance, {'service': options.instance}, clusters):
            logger.error("unable to continue with cluster {0}".format(options.instance))
    else:
        # do autodetection
        postmasters = get_postmasters_directories()

        # get all PostgreSQL instances
        for result_work_dir, data in postmasters.items():
            (ppid, dbversion, dbname) = data
            # if user requested a specific database name and version - don't try to connect to others
            if options.instance:
                if dbname != options.instance or not result_work_dir or not ppid:
                    continue
                if options.version is not None and dbversion != options.version:
                    continue
            try:
                conndata = detect_db_connection_arguments(
                    result_work_dir, ppid, dbversion, options.username, options.dbname)
                if conndata is None:
                    continue
                host = conndata['host']
                port = conndata['port']
                conn = build_connection(host, port, options.username, options.dbname)
                pgcon = psycopg2.connect(**conn)
            except Exception as e:
                logger.error('PostgreSQL exception {0}'.format(e))
                pgcon = None
            if pgcon:
                desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir,
                                         pid=ppid, pgcon=pgcon, conn=conn)
                clusters.append(desc)
    collectors = []
    groups = {}
    try:
        if len(clusters) == 0:
            logger.error('No suitable PostgreSQL instances detected, exiting...')
            logger.error('hint: use -v for details, ' +
                         'or specify connection parameters manually in the configuration file (-c)')
            sys.exit(1)

        # initialize the disks stat collector process and create an exchange queue
        q = JoinableQueue(1)
        work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]
        dbversion = dbversion or clusters[0]['ver']

        collector = DetachedDiskStatCollector(q, work_directories, dbversion)
        collector.start()
        consumer = DiskCollectorConsumer(q)

        collectors.append(HostStatCollector())
        collectors.append(SystemStatCollector())
        collectors.append(MemoryStatCollector())
        for cl in clusters:
            part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer)
            pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid)
            groupname = cl['wd']
            groups[groupname] = {'pg': pg, 'partitions': part}
            collectors.append(part)
            collectors.append(pg)

        # we don't want to mix diagnostics messages with useful output, so we log the former into a file.
        disable_logging_to_stderr()
        loop(collectors, consumer, groups, output_method)
        enable_logging_to_stderr()
    except KeyboardInterrupt:
        pass
    except curses.error:
        print(traceback.format_exc())
        if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ:
            print('Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh')
    except:
        print(traceback.format_exc())
    finally:
        sys.exit(0)