コード例 #1
0
ファイル: web.py プロジェクト: evakbe/PGObserver
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to yaml config file with datastore connect details. Default location - {} \
        If not found then ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]  will be used'.format(DEFAULT_CONF_FILE),
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='Web server port. Overrides value from config file', dest='port', type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST') or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT') or settings['database'].get('port') or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE') or settings['database'].get('name'))
    settings['database']['frontend_user'] = (os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (os.getenv('PGOBS_PASSWORD') or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host') and settings['database'].get('name') and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database']['frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
                   {}).get('port') or 8080},
        '/': {'tools.staticdir.root': current_dir},
        '/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True, 'tools.sessions.on': False},
        '/manifest.info': {'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(current_dir, '..',
                           'MANIFEST.MF'), 'tools.auth_basic.on': False, 'tools.sessions.on': False},
    }

    tplE.setup(settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h['host_id'])

        setattr(root, h['uishortname'], mf)
        setattr(root, str(h['host_id']), mf)  # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)  # JSON api exposure, enabling integration with other monitoring tools

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({'tools.oauthtool.on': True, 'tools.sessions.on': True,
                                      'tools.sessions.timeout': settings['oauth'].get('session_timeout', 43200)})

    cherrypy.quickstart(root, config=conf)
コード例 #2
0
ファイル: web.py プロジェクト: grofers/PGObserver
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]')
    parser.add_argument('--s3-config-path', help='Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
                              metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
                              default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
    parser.add_argument('-p', '--port', help='Web server port. Overrides value from config file', type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:         # S3 has precedence if specified
        import aws_s3_configreader
        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST') or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT') or settings['database'].get('port') or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE') or settings['database'].get('name'))
    settings['database']['frontend_user'] = (os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (os.getenv('PGOBS_PASSWORD') or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host') and settings['database'].get('name') and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database']['frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
                   {}).get('port') or 8080},
        '/': {'tools.staticdir.root': current_dir, 'request.dispatch': HostIdAndShortnameDispatcher()},
        '/healthcheck': {'tools.sessions.on': False},
        '/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True, 'tools.sessions.on': False},
        '/manifest.info': {'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(current_dir, '..',
                           'MANIFEST.MF'), 'tools.auth_basic.on': False, 'tools.sessions.on': False},
    }

    tplE.setup(settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    root.host = monitorfrontend.MonitorFrontend()
    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({'tools.oauthtool.on': True, 'tools.sessions.on': True,
                                      'tools.sessions.timeout': settings['oauth'].get('session_timeout', 43200)})

    cherrypy.quickstart(root, config=conf)
コード例 #3
0
                        default=(os.getenv('PW2_ISSL') or False))
    # Grafana
    parser.add_argument(
        '--grafana_baseurl',
        help='For linking to Grafana "Query details" dashboard',
        default='http://0.0.0.0:3000')

    cmd_args = parser.parse_args()

    logging.basicConfig(
        format='%(asctime)s %(levelname)s %(process)d %(message)s',
        level=(logging.DEBUG if int(cmd_args.verbose) >= 2 else (
            logging.INFO if int(cmd_args.verbose) == 1 else logging.ERROR)))
    logging.debug(cmd_args)

    datadb.setConnectionString(cmd_args.host, cmd_args.port, cmd_args.database,
                               cmd_args.user, cmd_args.password)
    pgwatch2_influx.influx_set_connection_params(cmd_args.influx_host,
                                                 cmd_args.influx_port,
                                                 cmd_args.influx_user,
                                                 cmd_args.influx_password,
                                                 cmd_args.influx_database,
                                                 cmd_args.influx_require_ssl)

    current_dir = os.path.dirname(os.path.abspath(__file__))
    config = {
        'global': {
            'server.socket_host': cmd_args.socket_host,
            'server.socket_port': cmd_args.socket_port
        },
        '/static': {
            'tools.staticdir.root': current_dir,
コード例 #4
0
    # Grafana
    parser.add_argument(
        '--grafana_baseurl',
        help='For linking to Grafana "Query details" dashboard',
        default=(os.getenv('PW2_GRAFANA_BASEURL') or 'http://0.0.0.0:3000'))

    cmd_args = parser.parse_args()

    logging.basicConfig(
        format='%(asctime)s %(levelname)s %(process)d %(message)s',
        level=(logging.DEBUG if int(cmd_args.verbose) >= 2 else (
            logging.INFO if int(cmd_args.verbose) == 1 else logging.ERROR)))
    logging.debug(cmd_args)

    datadb.setConnectionString(cmd_args.host, cmd_args.port, cmd_args.database,
                               cmd_args.user, cmd_args.password,
                               cmd_args.pg_require_ssl)
    err = datadb.isDataStoreConnectionOK()
    if err:
        logging.warning("config DB connection test failed: %s", err)

    pgwatch2_influx.influx_set_connection_params(cmd_args.influx_host,
                                                 cmd_args.influx_port,
                                                 cmd_args.influx_user,
                                                 cmd_args.influx_password,
                                                 cmd_args.influx_database,
                                                 cmd_args.influx_require_ssl)

    current_dir = os.path.dirname(os.path.abspath(__file__))
    config = {
        'global': {
コード例 #5
0
ファイル: web.py プロジェクト: jziggas/PGObserver
def main():
    parser = ArgumentParser(description="PGObserver Frontend")
    parser.add_argument(
        "-c",
        "--config",
        help="Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]",
    )
    parser.add_argument(
        "--s3-config-path",
        help="Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var",
        metavar="https://s3-region.amazonaws.com/x/y/file.yaml",
        default=os.getenv("PGOBS_CONFIG_S3_BUCKET"),
    )
    parser.add_argument("-p", "--port", help="Web server port. Overrides value from config file", type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:  # S3 has precedence if specified
        import aws_s3_configreader

        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print "WARNING. Config file {} not found! exiting...".format(args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, "rb") as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings["database"]["host"] = os.getenv("PGOBS_HOST") or settings["database"].get("host")
    settings["database"]["port"] = os.getenv("PGOBS_PORT") or settings["database"].get("port") or 5432
    settings["database"]["name"] = os.getenv("PGOBS_DATABASE") or settings["database"].get("name")
    settings["database"]["frontend_user"] = os.getenv("PGOBS_USER") or settings["database"].get("frontend_user")
    settings["database"]["password"] = os.getenv("PGOBS_PASSWORD") or settings["database"].get("frontend_password")

    if not (
        settings["database"].get("host")
        and settings["database"].get("name")
        and settings["database"].get("frontend_user")
    ):
        print "Mandatory datastore connect details missing!"
        print "Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]"
        print ""
        parser.print_help()
        return

    conn_string = " ".join(
        (
            "dbname=" + settings["database"]["name"],
            "host=" + settings["database"]["host"],
            "user="******"database"]["frontend_user"],
            "port=" + str(settings["database"]["port"]),
        )
    )
    print "Setting connection string to ... " + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + " password="******"database"]["frontend_password"]

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        "global": {
            "server.socket_host": "0.0.0.0",
            "server.socket_port": args.port or settings.get("frontend", {}).get("port") or 8080,
        },
        "/": {"tools.staticdir.root": current_dir},
        "/healthcheck": {"tools.sessions.on": False},
        "/static": {"tools.staticdir.dir": "static", "tools.staticdir.on": True, "tools.sessions.on": False},
        "/manifest.info": {
            "tools.staticfile.on": True,
            "tools.staticfile.filename": os.path.join(current_dir, "..", "MANIFEST.MF"),
            "tools.auth_basic.on": False,
            "tools.sessions.on": False,
        },
    }

    tplE.setup(settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h["host_id"])

        setattr(root, h["uishortname"], mf)
        setattr(root, str(h["host_id"]), mf)  # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get("oauth", {}).get("enable_oauth", False):
        print "switching on oauth ..."
        import oauth

        root.oauth = oauth.Oauth(settings["oauth"])
        cherrypy.config.update(
            {
                "tools.oauthtool.on": True,
                "tools.sessions.on": True,
                "tools.sessions.timeout": settings["oauth"].get("session_timeout", 43200),
            }
        )

    cherrypy.quickstart(root, config=conf)
コード例 #6
0
ファイル: export_to_influxdb.py プロジェクト: etel/PGObserver
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument('-c', '--config', help='Path to config file. (default: {})'.format(DEFAULT_CONF_FILE),
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('--hosts-to-sync', help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument('--drop-db', help='start with a fresh InfluxDB', action='store_true')
    parser.add_argument('--drop-series', help='drop single series', action='store_true')
    parser.add_argument('--daemon', help='keep scanning for new data in an endless loop', action='store_true')
    parser.add_argument('--check-interval', help='min. seconds between checking for fresh data on PgO for host/view',
                        default=30, type=int)
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v', '--verbose', help='more chat', action='store_true')
    group1.add_argument('-d', '--debug', help='even more chat', action='store_true')

    args = parser.parse_args()

    logging.basicConfig(format='%(message)s', level=(logging.DEBUG if args.debug
                                                     else (logging.INFO if args.verbose else logging.ERROR)))
    args.config = os.path.expanduser(args.config)

    settings = None
    if os.path.exists(args.config):
        logging.info("Trying to read config file from %s", args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        logging.error('Config file missing - Yaml file could not be found')
        parser.print_help()
        exit(1)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    logging.info('Setting connection string to: %s', conn_string)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.setConnectionString(conn_string)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                 settings['influxdb']['port'],
                                 settings['influxdb']['username'],
                                 settings['influxdb']['password'])

    idb_ensure_database(idb, settings['influxdb']['database'], args.drop_db)
    idb.switch_database(settings['influxdb']['database'])

    logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
    logging.info('Following views will be synced: %s', [x[0] for x in DATA_COLLECTION_QUERIES_TO_SERIES_MAPPING])

    last_check_time_per_host_and_view = collections.defaultdict(dict)
    loop_counter = 0
    while True:

        loop_counter += 1
        sql_active_hosts = 'select host_id as id, lower(host_ui_shortname) as ui_shortname from hosts where host_enabled order by 2'
        active_hosts, cols = datadb.executeAsDict(sql_active_hosts)
        logging.debug('Nr of active hosts found: %s', len(active_hosts))

        for ah in active_hosts:
            if args.hosts_to_sync:
                if str(ah['id']) not in args.hosts_to_sync.split(','):
                    # logging.debug('Skipping host %s (host_id=%s)', ah['ui_shortname'], ah['id'])
                    continue

            logging.info('Doing host: %s', ah['ui_shortname'])
            host_processing_start_time = time.time()

            for view_name, series_mapping_info in DATA_COLLECTION_QUERIES_TO_SERIES_MAPPING:

                base_name = series_mapping_info['base_name'].format(ui_shortname=ah['ui_shortname'], id=ah['id'])
                is_fan_out = series_mapping_info.get('cols_to_expand', False)
                if args.drop_series and loop_counter == 1:
                    logging.info('Dropping base series: %s ...', base_name)
                    if is_fan_out:
                        data = idb.query("list series /{}.*/".format(base_name))
                        if data[0]['points']:
                            series = [x['points'][0][1] for x in data]
                            for s in series:
                                logging.debug('Dropping series: %s ...', s)
                                idb.delete_series(s)
                        else:
                            logging.info('No existing series found to delete')
                    else:
                        idb.delete_series(base_name)

                last_data_pull_time_for_view = (last_check_time_per_host_and_view[ah['id']]).get(base_name)
                if last_data_pull_time_for_view > time.time() - args.check_interval:
                    logging.debug('Not pulling data as args.check_interval not passed yet [%s]', base_name)
                    continue
                logging.info('Fetching data from view "%s" into base series "%s"', view_name, base_name)

                latest_timestamp_for_series = None
                if not (args.drop_series and loop_counter == 1):  # no point to check if series was re-created
                    latest_timestamp_for_series = idb_get_last_timestamp_for_series_as_local_datetime(idb,
                                                                                                      base_name,
                                                                                                      is_fan_out)
                    logging.debug('Latest_timestamp_for_series: %s', latest_timestamp_for_series)
                data, columns = pgo_get_data_and_columns_from_view(ah['id'],
                                                                   view_name,
                                                                   settings['influxdb']['max_days_to_fetch'],
                                                                   latest_timestamp_for_series)
                logging.info('%s rows fetched [ latest prev. timestamp in InfluxDB : %s]', len(data), latest_timestamp_for_series)
                last_check_time_per_host_and_view[ah['id']][base_name] = time.time()

                try:
                    if len(data) > 0:
                        series_name = base_name
                        if is_fan_out:          # could leave it to continuous queries also but it would mean data duplication
                            prev_row_series_name = None
                            expanded_column_indexes = []
                            start_index = 0
                            current_index = 0
                            # logging.debug("Columns to expand: %s", series_mapping_info['cols_to_expand'])
                            for col in series_mapping_info['cols_to_expand']:
                                expanded_column_indexes.append(columns.index(col))
                            for row in data:
                                series_name = base_name
                                for ci in expanded_column_indexes:
                                    series_name += '.' + str(row[ci])
                                if series_name != prev_row_series_name and prev_row_series_name:
                                    idb_push_data(idb, prev_row_series_name, columns, data[start_index:current_index],
                                                  expanded_column_indexes)  # expanded_columns_will be removed from the dataset
                                    start_index = current_index
                                current_index += 1
                                prev_row_series_name = series_name

                            idb_push_data(idb, series_name, columns, data[start_index:current_index],
                                                  expanded_column_indexes)
                        else:
                            idb_push_data(idb, series_name, columns, data)

                        # insert "last update" marker into special series "hosts". useful for listing all different hosts for templated queries
                        idb_push_data(idb, HOST_UPDATE_STATUS_SERIES_NAME,
                                      ['host', 'view', 'pgo_timestamp'],
                                      [(ah['ui_shortname'], view_name, str(datetime.fromtimestamp(data[-1][0])))])
                    else:
                        logging.debug('no fresh data found on PgO')

                except Exception as e:
                    logging.error('ERROR - Could not process %s: %s', view_name, e.message)

            logging.info('Finished processing %s in %ss', ah['ui_shortname'], round(time.time() - host_processing_start_time))

        if not args.daemon:
            break

        time.sleep(1)
コード例 #7
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument(
        '-c',
        '--config',
        help=
        'Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
    )
    parser.add_argument(
        '--s3-config-path',
        help=
        'Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
        metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
        default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
    parser.add_argument(
        '-p',
        '--port',
        help='Web server port. Overrides value from config file',
        type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:  # S3 has precedence if specified
        import aws_s3_configreader
        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(
            args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(
                args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST')
                                    or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT')
                                    or settings['database'].get('port')
                                    or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE')
                                    or settings['database'].get('name'))
    settings['database']['frontend_user'] = (
        os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (
        os.getenv('PGOBS_PASSWORD')
        or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host')
            and settings['database'].get('name')
            and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database'][
        'frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {
            'server.socket_host':
            '0.0.0.0',
            'server.socket_port':
            args.port or settings.get('frontend', {}).get('port') or 8080
        },
        '/': {
            'tools.staticdir.root': current_dir,
            'request.dispatch': HostIdAndShortnameDispatcher()
        },
        '/healthcheck': {
            'tools.sessions.on': False
        },
        '/static': {
            'tools.staticdir.dir': 'static',
            'tools.staticdir.on': True,
            'tools.sessions.on': False
        },
        '/manifest.info': {
            'tools.staticfile.on':
            True,
            'tools.staticfile.filename':
            os.path.join(current_dir, '..', 'MANIFEST.MF'),
            'tools.auth_basic.on':
            False,
            'tools.sessions.on':
            False
        },
    }

    tplE.setup(
        settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    root.host = monitorfrontend.MonitorFrontend()
    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(
        root
    )  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({
            'tools.oauthtool.on':
            True,
            'tools.sessions.on':
            True,
            'tools.sessions.timeout':
            settings['oauth'].get('session_timeout', 43200)
        })

    cherrypy.quickstart(root, config=conf)
コード例 #8
0
ファイル: sprocdata.py プロジェクト: etel/PGObserver
            data['calls'].append( ( r['xaxis'] , r['d_calls'] ) )
            data['total_time'].append ( ( r['xaxis'] , r['d_total_time'] ) )
            data['self_time'].append ( ( r['xaxis'] , r['d_self_time'] ) )
            data['avg_time'].append ( ( r['xaxis'] , r['d_avg_time'] ) )
            data['avg_self_time'].append ( ( r['xaxis'] , r['d_avg_self_time'] ) )

    return data


def getAllActiveSprocNames(hostId):
    sql = """
    select
      distinct regexp_replace(sproc_name,'(\(.*\))','') as sproc_name
    from
      sprocs
      join sproc_performance_data on sp_sproc_id = sproc_id
    where sproc_host_id = %s
      and sp_host_id = %s
      and sp_timestamp > now() - '1 day'::interval
    """
    ret = datadb.execute(sql, (hostId, hostId))
    ret = [ x['sproc_name'] for x in ret ]
    ret.sort()
    return ret


if __name__ == '__main__':
    datadb.setConnectionString('dbname=prod_pgobserver_db host=pgobserver.db port=5433 user=kmoppel')
    print (getAllActiveSprocNames())

コード例 #9
0
ファイル: web.py プロジェクト: a1exsh/PGObserver
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to config file. (default: %s)'.format(DEFAULT_CONF_FILE), dest='config',
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='server port', dest='port', type=int)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    settings = None
    if os.path.exists(args.config):
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        print 'Config file missing - Yaml file could not be found'
        parser.print_help()
        return

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = os.getenv('PGOBS_HOST', settings['database']['host'])
    settings['database']['port'] = os.getenv('PGOBS_PORT', settings['database']['port'])
    settings['database']['name'] = os.getenv('PGOBS_DATABASE', settings['database']['name'])
    settings['database']['frontend_user'] = os.getenv('PGOBS_USER', settings['database']['frontend_user'])
    settings['database']['frontend_password'] = os.getenv('PGOBS_PASSWORD', settings['database']['frontend_password'])

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    print 'Setting connection string to ... ' + conn_string

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {'global':
                {
                    'server.socket_host': '0.0.0.0',
                    'server.socket_port': args.port or settings.get('frontend', {}).get('port') or 8080
                },
            '/':
                {
                    'tools.staticdir.root': current_dir
                },
            '/static':
                {
                    'tools.staticdir.dir': 'static',
                    'tools.staticdir.on': True
                },
            '/manifest.info':
                {
                    'tools.staticfile.on': True,
                    'tools.staticfile.filename': os.path.join(current_dir, '..', 'MANIFEST.MF'),
                    'tools.auth_basic.on': False
                }

            }

    tplE.setup(settings)    # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h['host_id'])

        setattr(root, h['uishortname'], mf)
        setattr(root, str(h['host_id']), mf) # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)   # JSON api exposure, enabling integration with other monitoring tools

    cherrypy.quickstart(root, config=conf)