Esempio n. 1
0
def setup_logger(options):
    logger.setLevel(logging.INFO if options.verbose else logging.ERROR)
    if options.log_file:
        LOG_FILE_NAME = options.log_file
        # truncate the former logs
        with open(LOG_FILE_NAME, 'w'):
            pass
        logging.basicConfig(format='%(levelname)s: %(asctime)-15s %(message)s', filename=LOG_FILE_NAME)
    else:
        logging.basicConfig(format='%(levelname)s: %(asctime)-15s %(message)s')
    enable_logging_to_stderr()
Esempio n. 2
0
def main():
    global options

    if not psycopg2_available:
        print(
            'Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue'
        )
        sys.exit(254)

    options, args = parse_args()
    consts.TICK_LENGTH = options.tick

    output_method = options.output_method

    if not output_method_is_valid(output_method):
        print('Unsupported output method: {0}'.format(output_method))
        print('Valid output methods are: {0}'.format(','.join(
            get_valid_output_methods())))
        sys.exit(1)

    if output_method == OUTPUT_METHOD.curses and not curses_available:
        print(
            'Curses output is selected, but curses are unavailable, falling back to console output'
        )
        output_method = OUTPUT_METHOD.console

    # set basic logging
    setup_logger(options)

    clusters = []

    config = read_configuration(
        options.config_file) if options.config_file else None
    dbversion = None
    # configuration file takes priority over the rest of database connection information sources.
    if config:
        for instance in config:
            if options.instance and instance != options.instance:
                continue
            # pass already aquired connections to make sure we only list unique clusters.
            host = config[instance].get('host')
            port = config[instance].get('port')
            conn = build_connection(host, port, config[instance].get('user'),
                                    config[instance].get('dbname'))

            if not establish_user_defined_connection(instance, conn, clusters):
                logger.error(
                    'failed to acquire details about ' +
                    'the database cluster {0}, the server will be skipped'.
                    format(instance))
    elif options.host:
        # connect to the database using the connection string supplied from command-line
        conn = build_connection(options.host, options.port, options.username,
                                options.dbname)
        instance = options.instance or "default"
        if not establish_user_defined_connection(instance, conn, clusters):
            logger.error(
                "unable to continue with cluster {0}".format(instance))
    elif options.use_service and options.instance:
        # connect to the database using the service name
        if not establish_user_defined_connection(
                options.instance, {'service': options.instance}, clusters):
            logger.error("unable to continue with cluster {0}".format(
                options.instance))
    else:
        # do autodetection
        postmasters = get_postmasters_directories()

        # get all PostgreSQL instances
        for result_work_dir, data in postmasters.items():
            (ppid, dbversion, dbname) = data
            # if user requested a specific database name and version - don't try to connect to others
            if options.instance:
                if dbname != options.instance or not result_work_dir or not ppid:
                    continue
                if options.version is not None and dbversion != options.version:
                    continue
            try:
                conndata = detect_db_connection_arguments(
                    result_work_dir, ppid, dbversion, options.username,
                    options.dbname)
                if conndata is None:
                    continue
                host = conndata['host']
                port = conndata['port']
                conn = build_connection(host, port, options.username,
                                        options.dbname)
                pgcon = psycopg2.connect(**conn)
            except Exception as e:
                logger.error('PostgreSQL exception {0}'.format(e))
                pgcon = None
            if pgcon:
                desc = make_cluster_desc(name=dbname,
                                         version=dbversion,
                                         workdir=result_work_dir,
                                         pid=ppid,
                                         pgcon=pgcon,
                                         conn=conn)
                clusters.append(desc)
    collectors = []
    groups = {}
    try:
        if len(clusters) == 0:
            logger.error(
                'No suitable PostgreSQL instances detected, exiting...')
            logger.error(
                'hint: use -v for details, ' +
                'or specify connection parameters manually in the configuration file (-c)'
            )
            sys.exit(1)

        # initialize the disks stat collector process and create an exchange queue
        q = JoinableQueue(1)
        work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]
        dbversion = dbversion or clusters[0]['ver']

        collector = DetachedDiskStatCollector(q, work_directories, dbversion)
        collector.start()
        consumer = DiskCollectorConsumer(q)

        collectors.append(HostStatCollector())
        collectors.append(SystemStatCollector())
        collectors.append(MemoryStatCollector())
        for cl in clusters:
            part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'],
                                          consumer)
            pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'],
                                 cl['name'], cl['ver'], options.pid)
            groupname = cl['wd']
            groups[groupname] = {'pg': pg, 'partitions': part}
            collectors.append(part)
            collectors.append(pg)

        # we don't want to mix diagnostics messages with useful output, so we log the former into a file.
        disable_logging_to_stderr()
        loop(collectors, consumer, groups, output_method)
        enable_logging_to_stderr()
    except KeyboardInterrupt:
        pass
    except curses.error:
        print(traceback.format_exc())
        if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ:
            print(
                'Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh'
            )
    except:
        print(traceback.format_exc())
    finally:
        sys.exit(0)
Esempio n. 3
0
def main():
    global options

    # bail out if we are not running Linux
    if platform.system() != 'Linux':
        print('Non Linux database hosts are not supported at the moment. Can not continue')
        sys.exit(243)

    if not psycopg2_available:
        print('Unable to import psycopg2 module, please, install it (python-psycopg2). Can not continue')
        sys.exit(254)

    options, args = parse_args()
    consts.TICK_LENGTH = options.tick

    output_method = options.output_method

    if not output_method_is_valid(output_method):
        print('Unsupported output method: {0}'.format(output_method))
        print('Valid output methods are: {0}'.format(','.join(get_valid_output_methods())))
        sys.exit(1)

    if output_method == OUTPUT_METHOD.curses and not curses_available:
        print('Curses output is selected, but curses are unavailable, falling back to console output')
        output_method = OUTPUT_METHOD.console

    # set basic logging
    setup_logger(options)

    clusters = []

    config = read_configuration(options.config_file) if options.config_file else None
    dbversion = None
    # configuration file takes priority over the rest of database connection information sources.
    if config:
        for instance in config:
            if options.instance and instance != options.instance:
                continue
            # pass already aquired connections to make sure we only list unique clusters.
            host = config[instance].get('host')
            port = config[instance].get('port')
            conn = build_connection(host, port,
                                    config[instance].get('user'), config[instance].get('dbname'))

            if not establish_user_defined_connection(instance, conn, clusters):
                logger.error('failed to acquire details about ' +
                             'the database cluster {0}, the server will be skipped'.format(instance))
    elif options.host:
        # connect to the database using the connection string supplied from command-line
        conn = build_connection(options.host, options.port, options.username, options.dbname)
        instance = options.instance or "default"
        if not establish_user_defined_connection(instance, conn, clusters):
            logger.error("unable to continue with cluster {0}".format(instance))
    elif options.use_service and options.instance:
        # connect to the database using the service name
        if not establish_user_defined_connection(options.instance, {'service': options.instance}, clusters):
            logger.error("unable to continue with cluster {0}".format(options.instance))
    else:
        # do autodetection
        postmasters = get_postmasters_directories()

        # get all PostgreSQL instances
        for result_work_dir, data in postmasters.items():
            (ppid, dbversion, dbname) = data
            # if user requested a specific database name and version - don't try to connect to others
            if options.instance:
                if dbname != options.instance or not result_work_dir or not ppid:
                    continue
                if options.version is not None and dbversion != options.version:
                    continue
            try:
                conndata = detect_db_connection_arguments(
                    result_work_dir, ppid, dbversion, options.username, options.dbname)
                if conndata is None:
                    continue
                host = conndata['host']
                port = conndata['port']
                conn = build_connection(host, port, options.username, options.dbname)
                pgcon = psycopg2.connect(**conn)
            except Exception as e:
                logger.error('PostgreSQL exception {0}'.format(e))
                pgcon = None
            if pgcon:
                desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir,
                                         pid=ppid, pgcon=pgcon, conn=conn)
                clusters.append(desc)
    collectors = []
    groups = {}
    try:
        if len(clusters) == 0:
            logger.error('No suitable PostgreSQL instances detected, exiting...')
            logger.error('hint: use -v for details, ' +
                         'or specify connection parameters manually in the configuration file (-c)')
            sys.exit(1)

        # initialize the disks stat collector process and create an exchange queue
        q = JoinableQueue(1)
        work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]
        dbversion = dbversion or clusters[0]['ver']

        collector = DetachedDiskStatCollector(q, work_directories, dbversion)
        collector.start()
        consumer = DiskCollectorConsumer(q)

        collectors.append(HostStatCollector())
        collectors.append(SystemStatCollector())
        collectors.append(MemoryStatCollector())
        for cl in clusters:
            part = PartitionStatCollector(cl['name'], cl['ver'], cl['wd'], consumer)
            pg = PgstatCollector(cl['pgcon'], cl['reconnect'], cl['pid'], cl['name'], cl['ver'], options.pid)
            groupname = cl['wd']
            groups[groupname] = {'pg': pg, 'partitions': part}
            collectors.append(part)
            collectors.append(pg)

        # we don't want to mix diagnostics messages with useful output, so we log the former into a file.
        disable_logging_to_stderr()
        loop(collectors, consumer, groups, output_method)
        enable_logging_to_stderr()
    except KeyboardInterrupt:
        pass
    except curses.error:
        print(traceback.format_exc())
        if 'SSH_CLIENT' in os.environ and 'SSH_TTY' not in os.environ:
            print('Unable to initialize curses. Make sure you supply -t option (force psedo-tty allocation) to ssh')
    except:
        print(traceback.format_exc())
    finally:
        sys.exit(0)