Example #1
0
def pgo_get_data_and_columns_from_view(host_id, ui_shortname, view_name, max_days_to_fetch, idb_latest_timestamp=None):
    dt_now = datetime.now()
    from_timestamp = idb_latest_timestamp
    to_timestamp = dt_now

    if from_timestamp is None:
        from_timestamp = dt_now - timedelta(days=max_days_to_fetch)

    if from_timestamp < dt_now - timedelta(days=settings.get('max_days_to_select_at_a_time', 3)):
        to_timestamp = from_timestamp + timedelta(days=settings.get('max_days_to_select_at_a_time', 3))
    else:
        to_timestamp = to_timestamp - timedelta(seconds=settings.get('safety_seconds_for_latest_data', 10))

    if from_timestamp >= to_timestamp:
        return [], None

    sql = open(os.path.join(TEMPLATES_FOLDER, view_name + '.sql')).read()
    sql_params = {'host_id': host_id, 'from_timestamp': from_timestamp, 'to_timestamp': to_timestamp,
                  'lag_interval': settings.get('lag_interval', '4 hours')}

    logging.debug("Executing:")
    logging.debug("%s", datadb.mogrify(sql, sql_params))

    try:
        view_data, columns = datadb.executeAsDict(sql, sql_params)
        return view_data, columns
    except Exception as e:
        logging.error('[%s] could not get data from PGO view %s: %s', ui_shortname, view_name, e)

    return [], []
Example #2
0
def pgo_get_data_and_columns_from_view(host_id, view_name, max_days_to_fetch, idb_latest_timestamp=None):
    dt_now = datetime.now()
    from_timestamp = idb_latest_timestamp
    to_timestamp = dt_now

    if from_timestamp is None:
        from_timestamp = dt_now - timedelta(days=max_days_to_fetch)

    if from_timestamp < dt_now - timedelta(days=MAX_DAYS_TO_SELECT_AT_A_TIME):
        to_timestamp = from_timestamp + timedelta(days=MAX_DAYS_TO_SELECT_AT_A_TIME)
    else:
        to_timestamp = to_timestamp - timedelta(seconds=SAFETY_SECONDS_FOR_LATEST_DATA)

    if from_timestamp >= to_timestamp:
        return [], None

    sql = open(os.path.join(TEMPLATES_FOLDER, view_name + ".sql")).read()
    sql_params = {"host_id": host_id, "from_timestamp": from_timestamp, "to_timestamp": to_timestamp}

    logging.debug("Executing:")
    logging.debug("%s", datadb.mogrify(sql, sql_params))

    view_data, columns = datadb.executeAsDict(sql, sql_params)

    return view_data, columns
Example #3
0
def pgo_get_data_and_columns_from_view(host_id,
                                       view_name,
                                       max_days_to_fetch,
                                       idb_latest_timestamp=None):
    dt_now = datetime.now()
    from_timestamp = idb_latest_timestamp
    to_timestamp = dt_now

    if from_timestamp is None:
        from_timestamp = dt_now - timedelta(days=max_days_to_fetch)

    if from_timestamp < dt_now - timedelta(days=MAX_DAYS_TO_SELECT_AT_A_TIME):
        to_timestamp = from_timestamp + timedelta(
            days=MAX_DAYS_TO_SELECT_AT_A_TIME)
    else:
        to_timestamp = to_timestamp - timedelta(
            seconds=SAFETY_SECONDS_FOR_LATEST_DATA)

    if from_timestamp >= to_timestamp:
        return [], None

    sql = open(os.path.join(TEMPLATES_FOLDER, view_name + '.sql')).read()
    sql_params = {
        'host_id': host_id,
        'from_timestamp': from_timestamp,
        'to_timestamp': to_timestamp
    }

    logging.debug("Executing:")
    logging.debug("%s", datadb.mogrify(sql, sql_params))

    view_data, columns = datadb.executeAsDict(sql, sql_params)

    return view_data, columns
Example #4
0
def pgo_get_data_and_columns_from_view(host_id,
                                       ui_shortname,
                                       view_name,
                                       max_days_to_fetch,
                                       idb_latest_timestamp=None):
    dt_now = datetime.now()
    from_timestamp = idb_latest_timestamp
    to_timestamp = dt_now

    if from_timestamp is None:
        from_timestamp = dt_now - timedelta(days=max_days_to_fetch)

    if from_timestamp < dt_now - timedelta(
            days=settings.get('max_days_to_select_at_a_time', 3)):
        to_timestamp = from_timestamp + timedelta(
            days=settings.get('max_days_to_select_at_a_time', 3))
    else:
        to_timestamp = to_timestamp - timedelta(
            seconds=settings.get('safety_seconds_for_latest_data', 10))

    if from_timestamp >= to_timestamp:
        return [], None

    sql = open(os.path.join(TEMPLATES_FOLDER, view_name + '.sql')).read()
    sql_params = {
        'host_id': host_id,
        'from_timestamp': from_timestamp,
        'to_timestamp': to_timestamp,
        'lag_interval': settings.get('lag_interval', '4 hours')
    }

    logging.debug("Executing:")
    logging.debug("%s", datadb.mogrify(sql, sql_params))

    try:
        view_data, columns = datadb.executeAsDict(sql, sql_params)
        return view_data, columns
    except Exception as e:
        logging.error('[%s] could not get data from PGO view %s: %s',
                      ui_shortname, view_name, e)

    return [], []
Example #5
0
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument('-c', '--config', help='Path to config file. (default: {})'.format(DEFAULT_CONF_FILE),
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('--hosts-to-sync', help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument('--drop-db', help='start with a fresh InfluxDB', action='store_true')
    parser.add_argument('--drop-series', help='drop single series', action='store_true')
    parser.add_argument('--daemon', help='keep scanning for new data in an endless loop', action='store_true')
    parser.add_argument('--check-interval', help='min. seconds between checking for fresh data on PgO for host/view',
                        default=30, type=int)
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v', '--verbose', help='more chat', action='store_true')
    group1.add_argument('-d', '--debug', help='even more chat', action='store_true')

    args = parser.parse_args()

    logging.basicConfig(format='%(message)s', level=(logging.DEBUG if args.debug
                                                     else (logging.INFO if args.verbose else logging.ERROR)))
    args.config = os.path.expanduser(args.config)

    settings = None
    if os.path.exists(args.config):
        logging.info("Trying to read config file from %s", args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        logging.error('Config file missing - Yaml file could not be found')
        parser.print_help()
        exit(1)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    logging.info('Setting connection string to: %s', conn_string)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.setConnectionString(conn_string)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                 settings['influxdb']['port'],
                                 settings['influxdb']['username'],
                                 settings['influxdb']['password'])

    idb_ensure_database(idb, settings['influxdb']['database'], args.drop_db)
    idb.switch_database(settings['influxdb']['database'])

    logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
    logging.info('Following views will be synced: %s', [x[0] for x in DATA_COLLECTION_QUERIES_TO_SERIES_MAPPING])

    last_check_time_per_host_and_view = collections.defaultdict(dict)
    loop_counter = 0
    while True:

        loop_counter += 1
        sql_active_hosts = 'select host_id as id, lower(host_ui_shortname) as ui_shortname from hosts where host_enabled order by 2'
        active_hosts, cols = datadb.executeAsDict(sql_active_hosts)
        logging.debug('Nr of active hosts found: %s', len(active_hosts))

        for ah in active_hosts:
            if args.hosts_to_sync:
                if str(ah['id']) not in args.hosts_to_sync.split(','):
                    # logging.debug('Skipping host %s (host_id=%s)', ah['ui_shortname'], ah['id'])
                    continue

            logging.info('Doing host: %s', ah['ui_shortname'])
            host_processing_start_time = time.time()

            for view_name, series_mapping_info in DATA_COLLECTION_QUERIES_TO_SERIES_MAPPING:

                base_name = series_mapping_info['base_name'].format(ui_shortname=ah['ui_shortname'], id=ah['id'])
                is_fan_out = series_mapping_info.get('cols_to_expand', False)
                if args.drop_series and loop_counter == 1:
                    logging.info('Dropping base series: %s ...', base_name)
                    if is_fan_out:
                        data = idb.query("list series /{}.*/".format(base_name))
                        if data[0]['points']:
                            series = [x['points'][0][1] for x in data]
                            for s in series:
                                logging.debug('Dropping series: %s ...', s)
                                idb.delete_series(s)
                        else:
                            logging.info('No existing series found to delete')
                    else:
                        idb.delete_series(base_name)

                last_data_pull_time_for_view = (last_check_time_per_host_and_view[ah['id']]).get(base_name)
                if last_data_pull_time_for_view > time.time() - args.check_interval:
                    logging.debug('Not pulling data as args.check_interval not passed yet [%s]', base_name)
                    continue
                logging.info('Fetching data from view "%s" into base series "%s"', view_name, base_name)

                latest_timestamp_for_series = None
                if not (args.drop_series and loop_counter == 1):  # no point to check if series was re-created
                    latest_timestamp_for_series = idb_get_last_timestamp_for_series_as_local_datetime(idb,
                                                                                                      base_name,
                                                                                                      is_fan_out)
                    logging.debug('Latest_timestamp_for_series: %s', latest_timestamp_for_series)
                data, columns = pgo_get_data_and_columns_from_view(ah['id'],
                                                                   view_name,
                                                                   settings['influxdb']['max_days_to_fetch'],
                                                                   latest_timestamp_for_series)
                logging.info('%s rows fetched [ latest prev. timestamp in InfluxDB : %s]', len(data), latest_timestamp_for_series)
                last_check_time_per_host_and_view[ah['id']][base_name] = time.time()

                try:
                    if len(data) > 0:
                        series_name = base_name
                        if is_fan_out:          # could leave it to continuous queries also but it would mean data duplication
                            prev_row_series_name = None
                            expanded_column_indexes = []
                            start_index = 0
                            current_index = 0
                            # logging.debug("Columns to expand: %s", series_mapping_info['cols_to_expand'])
                            for col in series_mapping_info['cols_to_expand']:
                                expanded_column_indexes.append(columns.index(col))
                            for row in data:
                                series_name = base_name
                                for ci in expanded_column_indexes:
                                    series_name += '.' + str(row[ci])
                                if series_name != prev_row_series_name and prev_row_series_name:
                                    idb_push_data(idb, prev_row_series_name, columns, data[start_index:current_index],
                                                  expanded_column_indexes)  # expanded_columns_will be removed from the dataset
                                    start_index = current_index
                                current_index += 1
                                prev_row_series_name = series_name

                            idb_push_data(idb, series_name, columns, data[start_index:current_index],
                                                  expanded_column_indexes)
                        else:
                            idb_push_data(idb, series_name, columns, data)

                        # insert "last update" marker into special series "hosts". useful for listing all different hosts for templated queries
                        idb_push_data(idb, HOST_UPDATE_STATUS_SERIES_NAME,
                                      ['host', 'view', 'pgo_timestamp'],
                                      [(ah['ui_shortname'], view_name, str(datetime.fromtimestamp(data[-1][0])))])
                    else:
                        logging.debug('no fresh data found on PgO')

                except Exception as e:
                    logging.error('ERROR - Could not process %s: %s', view_name, e.message)

            logging.info('Finished processing %s in %ss', ah['ui_shortname'], round(time.time() - host_processing_start_time))

        if not args.daemon:
            break

        time.sleep(1)
Example #6
0
def main():
    parser = ArgumentParser(description="PGObserver InfluxDB Exporter Daemon")
    parser.add_argument(
        "-c", "--config", help="Path to config file. (default: {})".format(DEFAULT_CONF_FILE), default=DEFAULT_CONF_FILE
    )
    parser.add_argument("--hosts-to-sync", help="only given host_ids (comma separated) will be pushed to Influx")
    parser.add_argument(
        "--drop-db",
        action="store_true",
        help="start with a fresh InfluxDB. Needs root login i.e. meant for testing purposes",
    )
    parser.add_argument(
        "--check-interval",
        help="min. seconds between checking for fresh data on PgO for host/view",
        default=30,
        type=int,
    )
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument("-v", "--verbose", action="store_true", help="more chat")
    group1.add_argument("-d", "--debug", action="store_true", help="even more chat")

    args = parser.parse_args()

    logging.basicConfig(
        format="%(asctime)s %(threadName)s %(message)s",
        level=(logging.DEBUG if args.debug else (logging.INFO if args.verbose else logging.ERROR)),
    )
    args.config = os.path.expanduser(args.config)

    global settings
    if os.path.exists(args.config):
        logging.info("Trying to read config file from %s", args.config)
        with open(args.config, "rb") as fd:
            settings = yaml.load(fd)

    if settings is None:
        logging.error("Config file missing - Yaml file could not be found")
        parser.print_help()
        exit(1)

    conn_string = " ".join(
        (
            "dbname=" + settings["database"]["name"],
            "host=" + settings["database"]["host"],
            "user="******"database"]["frontend_user"],
            "port=" + str(settings["database"]["port"]),
        )
    )

    logging.info("Setting connection string to: %s", conn_string)

    conn_string = " ".join(
        (
            "dbname=" + settings["database"]["name"],
            "host=" + settings["database"]["host"],
            "user="******"database"]["frontend_user"],
            "password="******"database"]["frontend_password"],
            "port=" + str(settings["database"]["port"]),
        )
    )

    datadb.set_connection_string_and_pool_size(conn_string, int(settings["max_worker_threads"]) + 1)

    idb = influxdb.InfluxDBClient(
        settings["influxdb"]["host"],
        settings["influxdb"]["port"],
        settings["influxdb"]["username"],
        settings["influxdb"]["password"],
    )

    if args.drop_db:
        logging.debug("DBs found from InfluxDB: %s", idb.get_list_database())
        idb_ensure_database(idb, settings["influxdb"]["database"], args.drop_db)

    idb.switch_database(settings["influxdb"]["database"])

    logging.info("Following views will be synced: %s", settings["data_collection_queries_to_process"])

    hosts_to_sync = []
    if args.hosts_to_sync:
        hosts_to_sync = args.hosts_to_sync.split(",")
        hosts_to_sync = [int(x) for x in hosts_to_sync]
        logging.debug("Syncing only hosts: %s", hosts_to_sync)

    last_queued_time_for_host = collections.defaultdict(dict)
    is_first_loop = True
    workers = []
    active_hosts = []
    active_hosts_refresh_time = 0
    sql_active_hosts = (
        "select host_id as id, replace(lower(host_ui_shortname), '-','') as ui_shortname from monitor_data.hosts "
        "where host_enabled and (%s = '{}' or host_id = any(%s)) order by 2"
    )

    while True:

        if time.time() - active_hosts_refresh_time > 180:  # checking for hosts changes every 3 minutes
            try:
                active_hosts, cols = datadb.executeAsDict(sql_active_hosts, (hosts_to_sync, hosts_to_sync))
                active_hosts_refresh_time = time.time()
            except Exception as e:
                if is_first_loop:  # ignore otherwise, db could be down for maintenance
                    raise e
                logging.error("Could not refresh active host info: %s", e)

        if is_first_loop:  # setup
            workers_to_spawn = min(
                min(
                    len(hosts_to_sync) if hosts_to_sync else settings["max_worker_threads"],
                    settings["max_worker_threads"],
                ),
                len(sql_active_hosts),
            )
            logging.debug("Nr of monitored hosts: %s", len(active_hosts))
            logging.info("Creating %s worker threads...", workers_to_spawn)
            for i in range(0, workers_to_spawn):
                wt = WorkerThread(args)
                wt.start()
                workers.append(wt)

        if queue.qsize() <= len(active_hosts) * 2:

            for ah in active_hosts:

                last_data_pull_time_for_view = last_queued_time_for_host.get(ah["id"], 0)
                if time.time() - last_data_pull_time_for_view < args.check_interval:
                    # logging.debug('Not pulling data as args.check_interval not passed yet [host_id %s]', ah['id'])
                    continue

                logging.info("Putting %s to queue...", ah["ui_shortname"])
                queue.put(
                    {
                        "id": ah["id"],
                        "ui_shortname": ah["ui_shortname"],
                        "is_first_loop": is_first_loop,
                        "queued_on": time.time(),
                    }
                )
                last_queued_time_for_host[ah["id"]] = time.time()

            if is_first_loop:
                is_first_loop = False

        logging.debug("Main thread sleeps...")
        time.sleep(5)
Example #7
0
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument('-c', '--config', help='Path to config file. (default: {})'.format(DEFAULT_CONF_FILE),
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('--hosts-to-sync', help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument('--drop-db', action='store_true', help='start with a fresh InfluxDB. Needs root login i.e. meant for testing purposes')
    parser.add_argument('--drop-series', action='store_true', help='drop single series')
    parser.add_argument('--check-interval', help='min. seconds between checking for fresh data on PgO for host/view',
                        default=30, type=int)
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v', '--verbose', action='store_true', help='more chat')
    group1.add_argument('-d', '--debug', action='store_true', help='even more chat')

    args = parser.parse_args()

    logging.basicConfig(format='%(asctime)s %(threadName)s %(message)s', level=(logging.DEBUG if args.debug
                                                     else (logging.INFO if args.verbose else logging.ERROR)))
    args.config = os.path.expanduser(args.config)

    global settings
    if os.path.exists(args.config):
        logging.info("Trying to read config file from %s", args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        logging.error('Config file missing - Yaml file could not be found')
        parser.print_help()
        exit(1)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    logging.info('Setting connection string to: %s', conn_string)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.set_connection_string_and_pool_size(conn_string, int(settings['max_worker_threads']) + 1)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                 settings['influxdb']['port'],
                                 settings['influxdb']['username'],
                                 settings['influxdb']['password'])

    if args.drop_db:
        logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
        idb_ensure_database(idb, settings['influxdb']['database'], args.drop_db)

    idb.switch_database(settings['influxdb']['database'])

    logging.info('Following views will be synced: %s', settings['data_collection_queries_to_process'])

    hosts_to_sync = []
    if args.hosts_to_sync:
        hosts_to_sync = args.hosts_to_sync.split(',')
        hosts_to_sync = [int(x) for x in hosts_to_sync]
        logging.debug('Syncing only hosts: %s', hosts_to_sync)

    last_queued_time_for_host = collections.defaultdict(dict)
    is_first_loop = True
    workers = []
    active_hosts = []
    active_hosts_refresh_time = 0
    sql_active_hosts = "select host_id as id, replace(lower(host_ui_shortname), '-','') as ui_shortname from monitor_data.hosts " \
                       "where host_enabled and (%s = '{}' or host_id = any(%s)) order by 2"

    while True:

        if time.time() - active_hosts_refresh_time > 180:  # checking for hosts changes every 3 minutes
            try:
                active_hosts, cols = datadb.executeAsDict(sql_active_hosts, (hosts_to_sync, hosts_to_sync))
                active_hosts_refresh_time = time.time()
            except Exception as e:
                if is_first_loop:   # ignore otherwise, db could be down for maintenance
                    raise e
                logging.error('Could not refresh active host info: %s', e)

        if is_first_loop:   # setup
            workers_to_spawn = min(min(len(hosts_to_sync) if hosts_to_sync else settings['max_worker_threads'], settings['max_worker_threads']), len(sql_active_hosts))
            logging.debug('Nr of monitored hosts: %s', len(active_hosts))
            logging.info('Creating %s worker threads...', workers_to_spawn)
            for i in range(0, workers_to_spawn):
                wt = WorkerThread(args)
                wt.start()
                workers.append(wt)

        if queue.qsize() <= len(active_hosts) * 2:

            for ah in active_hosts:

                last_data_pull_time_for_view = last_queued_time_for_host.get(ah['id'], 0)
                if time.time() - last_data_pull_time_for_view < args.check_interval:
                    # logging.debug('Not pulling data as args.check_interval not passed yet [host_id %s]', ah['id'])
                    continue

                logging.info('Putting %s to queue...', ah['ui_shortname'])
                queue.put({'id': ah['id'], 'ui_shortname': ah['ui_shortname'],
                           'is_first_loop': is_first_loop, 'queued_on': time.time()})
                last_queued_time_for_host[ah['id']] = time.time()

            if is_first_loop:
                is_first_loop = False

        logging.debug('Main thread sleeps...')
        time.sleep(5)
Example #8
0
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument('-c', '--config', help='Path to local config file (template file: {})'.format(DEFAULT_CONF_FILE))
    parser.add_argument('--s3-region', help='AWS S3 region for the config file', default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_REGION'))
    parser.add_argument('--s3-bucket', help='AWS S3 bucket for the config file', default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_BUCKET'))
    parser.add_argument('--s3-key', help='AWS S3 key for the config file', default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_KEY'))
    parser.add_argument('--hosts-to-sync', help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument('--drop-db', action='store_true', help='start with a fresh InfluxDB. Needs root login i.e. meant for testing purposes')
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v', '--verbose', action='store_true', help='more chat')
    group1.add_argument('-d', '--debug', action='store_true', help='even more chat')

    args = parser.parse_args()

    logging.basicConfig(format='%(asctime)s %(levelname)s %(threadName)s %(message)s', level=(logging.DEBUG if args.debug
                                                     else (logging.INFO if args.verbose else logging.ERROR)))

    global settings

    if args.config:
        args.config = os.path.expanduser(args.config)
        if os.path.exists(args.config):
            logging.info("Trying to read config file from %s", args.config)
            with open(args.config, 'rb') as fd:
                settings = yaml.load(fd)
    elif args.s3_region and args.s3_bucket and args.s3_key:
        logging.info("Trying to read config file from S3...")
        config_file_as_string = get_s3_key_as_string(args.s3_region, args.s3_bucket, args.s3_key)
        settings = yaml.load(config_file_as_string)

    if not (settings and settings.get('database') and settings.get('influxdb')):
        logging.error('Config info missing - recheck the --config or --s3_config/--s3-region input!')
        parser.print_help()
        exit(1)

    conn_params = {'dbname': settings['database']['name'],
                    'host': settings['database']['host'],
                    'user': settings['database']['frontend_user'],
                    'port': settings['database']['port']}

    logging.info('Setting connection string to: %s', conn_params)
    conn_params['password'] = settings['database']['frontend_password']

    datadb.init_connection_pool(int(settings['max_worker_threads']) + 1, **conn_params)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                 settings['influxdb']['port'],
                                 settings['influxdb']['username'],
                                 settings['influxdb']['password'])

    if args.drop_db:
        logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
        idb_ensure_database(idb, settings['influxdb']['database'], True)
    else:
        idb_ensure_database(idb, settings['influxdb']['database'], False)
    idb.switch_database(settings['influxdb']['database'])

    logging.info('Following views will be synced: %s', settings['data_collection_queries_to_process'])

    hosts_to_sync = []
    if args.hosts_to_sync:
        hosts_to_sync = args.hosts_to_sync.split(',')
        hosts_to_sync = [int(x) for x in hosts_to_sync]
        logging.debug('Syncing only hosts: %s', hosts_to_sync)

    last_queued_time_for_host = collections.defaultdict(dict)
    is_first_loop = True
    workers = []
    active_hosts = []
    active_hosts_refresh_time = 0
    sql_active_hosts = "select host_id as id, replace(lower(host_ui_shortname), '-','') as ui_shortname from monitor_data.hosts " \
                       "where host_enabled and (%s = '{}' or host_id = any(%s)) order by 2"

    while True:

        if time.time() - active_hosts_refresh_time > 180:  # checking for hosts changes every 3 minutes
            try:
                active_hosts, cols = datadb.executeAsDict(sql_active_hosts, (hosts_to_sync, hosts_to_sync))
                active_hosts_refresh_time = time.time()
            except Exception as e:
                if is_first_loop:   # ignore otherwise, db could be down for maintenance
                    raise e
                logging.error('Could not refresh active host info: %s', e)

        if is_first_loop:   # setup
            workers_to_spawn = min(min(len(hosts_to_sync) if hosts_to_sync else settings['max_worker_threads'], settings['max_worker_threads']), len(sql_active_hosts))
            logging.debug('Nr of monitored hosts: %s', len(active_hosts))
            logging.info('Creating %s worker threads...', workers_to_spawn)
            for i in range(0, workers_to_spawn):
                wt = WorkerThread(args)
                wt.start()
                workers.append(wt)

        if queue.qsize() <= len(active_hosts) * 2:

            for ah in active_hosts:

                last_data_pull_time_for_view = last_queued_time_for_host.get(ah['id'], 0)
                if time.time() - last_data_pull_time_for_view < settings.get('min_check_interval_for_host', 30):
                    # logging.debug('Not pulling data as args.check_interval not passed yet [host_id %s]', ah['id'])
                    continue

                logging.debug('Putting %s to queue...', ah['ui_shortname'])
                queue.put({'id': ah['id'], 'ui_shortname': ah['ui_shortname'],
                           'is_first_loop': is_first_loop, 'queued_on': time.time()})
                last_queued_time_for_host[ah['id']] = time.time()

            if is_first_loop:
                is_first_loop = False

        logging.debug('Main thread sleeps...')
        time.sleep(5)
Example #9
0
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument(
        '-c',
        '--config',
        help='Path to local config file (template file: {})'.format(
            DEFAULT_CONF_FILE))
    parser.add_argument(
        '--s3-region',
        help='AWS S3 region for the config file',
        default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_REGION'))
    parser.add_argument(
        '--s3-bucket',
        help='AWS S3 bucket for the config file',
        default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_BUCKET'))
    parser.add_argument('--s3-key',
                        help='AWS S3 key for the config file',
                        default=os.environ.get('PGOBS_EXPORTER_CONFIG_S3_KEY'))
    parser.add_argument(
        '--hosts-to-sync',
        help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument(
        '--drop-db',
        action='store_true',
        help=
        'start with a fresh InfluxDB. Needs root login i.e. meant for testing purposes'
    )
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='more chat')
    group1.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='even more chat')

    args = parser.parse_args()

    logging.basicConfig(
        format='%(asctime)s %(levelname)s %(threadName)s %(message)s',
        level=(logging.DEBUG if args.debug else
               (logging.INFO if args.verbose else logging.ERROR)))

    global settings

    if args.config:
        args.config = os.path.expanduser(args.config)
        if os.path.exists(args.config):
            logging.info("Trying to read config file from %s", args.config)
            with open(args.config, 'rb') as fd:
                settings = yaml.load(fd)
    elif args.s3_region and args.s3_bucket and args.s3_key:
        logging.info("Trying to read config file from S3...")
        config_file_as_string = get_s3_key_as_string(args.s3_region,
                                                     args.s3_bucket,
                                                     args.s3_key)
        settings = yaml.load(config_file_as_string)

    if not (settings and settings.get('database')
            and settings.get('influxdb')):
        logging.error(
            'Config info missing - recheck the --config or --s3_config/--s3-region input!'
        )
        parser.print_help()
        exit(1)

    conn_params = {
        'dbname': settings['database']['name'],
        'host': settings['database']['host'],
        'user': settings['database']['frontend_user'],
        'port': settings['database']['port']
    }

    logging.info('Setting connection string to: %s', conn_params)
    conn_params['password'] = settings['database']['frontend_password']

    datadb.init_connection_pool(
        int(settings['max_worker_threads']) + 1, **conn_params)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                  settings['influxdb']['port'],
                                  settings['influxdb']['username'],
                                  settings['influxdb']['password'])

    if args.drop_db:
        logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
        idb_ensure_database(idb, settings['influxdb']['database'], True)
    else:
        idb_ensure_database(idb, settings['influxdb']['database'], False)
    idb.switch_database(settings['influxdb']['database'])

    logging.info('Following views will be synced: %s',
                 settings['data_collection_queries_to_process'])

    hosts_to_sync = []
    if args.hosts_to_sync:
        hosts_to_sync = args.hosts_to_sync.split(',')
        hosts_to_sync = [int(x) for x in hosts_to_sync]
        logging.debug('Syncing only hosts: %s', hosts_to_sync)

    last_queued_time_for_host = collections.defaultdict(dict)
    is_first_loop = True
    workers = []
    active_hosts = []
    active_hosts_refresh_time = 0
    sql_active_hosts = "select host_id as id, replace(lower(host_ui_shortname), '-','') as ui_shortname from monitor_data.hosts " \
                       "where host_enabled and (%s = '{}' or host_id = any(%s)) order by 2"

    while True:

        if time.time(
        ) - active_hosts_refresh_time > 180:  # checking for hosts changes every 3 minutes
            try:
                active_hosts, cols = datadb.executeAsDict(
                    sql_active_hosts, (hosts_to_sync, hosts_to_sync))
                active_hosts_refresh_time = time.time()
            except Exception as e:
                if is_first_loop:  # ignore otherwise, db could be down for maintenance
                    raise e
                logging.error('Could not refresh active host info: %s', e)

        if is_first_loop:  # setup
            workers_to_spawn = min(
                min(
                    len(hosts_to_sync)
                    if hosts_to_sync else settings['max_worker_threads'],
                    settings['max_worker_threads']), len(sql_active_hosts))
            logging.debug('Nr of monitored hosts: %s', len(active_hosts))
            logging.info('Creating %s worker threads...', workers_to_spawn)
            for i in range(0, workers_to_spawn):
                wt = WorkerThread(args)
                wt.start()
                workers.append(wt)

        if queue.qsize() <= len(active_hosts) * 2:

            for ah in active_hosts:

                last_data_pull_time_for_view = last_queued_time_for_host.get(
                    ah['id'], 0)
                if time.time() - last_data_pull_time_for_view < settings.get(
                        'min_check_interval_for_host', 30):
                    # logging.debug('Not pulling data as args.check_interval not passed yet [host_id %s]', ah['id'])
                    continue

                logging.debug('Putting %s to queue...', ah['ui_shortname'])
                queue.put({
                    'id': ah['id'],
                    'ui_shortname': ah['ui_shortname'],
                    'is_first_loop': is_first_loop,
                    'queued_on': time.time()
                })
                last_queued_time_for_host[ah['id']] = time.time()

            if is_first_loop:
                is_first_loop = False

        logging.debug('Main thread sleeps...')
        time.sleep(5)
Example #10
0
def main():
    parser = ArgumentParser(description='PGObserver InfluxDB Exporter Daemon')
    parser.add_argument(
        '-c',
        '--config',
        help='Path to config file. (default: {})'.format(DEFAULT_CONF_FILE),
        default=DEFAULT_CONF_FILE)
    parser.add_argument(
        '--hosts-to-sync',
        help='only given host_ids (comma separated) will be pushed to Influx')
    parser.add_argument(
        '--drop-db',
        action='store_true',
        help=
        'start with a fresh InfluxDB. Needs root login i.e. meant for testing purposes'
    )
    parser.add_argument(
        '--check-interval',
        help=
        'min. seconds between checking for fresh data on PgO for host/view',
        default=30,
        type=int)
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='more chat')
    group1.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='even more chat')

    args = parser.parse_args()

    logging.basicConfig(
        format='%(asctime)s %(threadName)s %(message)s',
        level=(logging.DEBUG if args.debug else
               (logging.INFO if args.verbose else logging.ERROR)))
    args.config = os.path.expanduser(args.config)

    global settings
    if os.path.exists(args.config):
        logging.info("Trying to read config file from %s", args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        logging.error('Config file missing - Yaml file could not be found')
        parser.print_help()
        exit(1)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    logging.info('Setting connection string to: %s', conn_string)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.set_connection_string_and_pool_size(
        conn_string,
        int(settings['max_worker_threads']) + 1)

    idb = influxdb.InfluxDBClient(settings['influxdb']['host'],
                                  settings['influxdb']['port'],
                                  settings['influxdb']['username'],
                                  settings['influxdb']['password'])

    if args.drop_db:
        logging.debug('DBs found from InfluxDB: %s', idb.get_list_database())
        idb_ensure_database(idb, settings['influxdb']['database'],
                            args.drop_db)

    idb.switch_database(settings['influxdb']['database'])

    logging.info('Following views will be synced: %s',
                 settings['data_collection_queries_to_process'])

    hosts_to_sync = []
    if args.hosts_to_sync:
        hosts_to_sync = args.hosts_to_sync.split(',')
        hosts_to_sync = [int(x) for x in hosts_to_sync]
        logging.debug('Syncing only hosts: %s', hosts_to_sync)

    last_queued_time_for_host = collections.defaultdict(dict)
    is_first_loop = True
    workers = []
    active_hosts = []
    active_hosts_refresh_time = 0
    sql_active_hosts = "select host_id as id, replace(lower(host_ui_shortname), '-','') as ui_shortname from monitor_data.hosts " \
                       "where host_enabled and (%s = '{}' or host_id = any(%s)) order by 2"

    while True:

        if time.time(
        ) - active_hosts_refresh_time > 180:  # checking for hosts changes every 3 minutes
            try:
                active_hosts, cols = datadb.executeAsDict(
                    sql_active_hosts, (hosts_to_sync, hosts_to_sync))
                active_hosts_refresh_time = time.time()
            except Exception as e:
                if is_first_loop:  # ignore otherwise, db could be down for maintenance
                    raise e
                logging.error('Could not refresh active host info: %s', e)

        if is_first_loop:  # setup
            workers_to_spawn = min(
                min(
                    len(hosts_to_sync)
                    if hosts_to_sync else settings['max_worker_threads'],
                    settings['max_worker_threads']), len(sql_active_hosts))
            logging.debug('Nr of monitored hosts: %s', len(active_hosts))
            logging.info('Creating %s worker threads...', workers_to_spawn)
            for i in range(0, workers_to_spawn):
                wt = WorkerThread(args)
                wt.start()
                workers.append(wt)

        if queue.qsize() <= len(active_hosts) * 2:

            for ah in active_hosts:

                last_data_pull_time_for_view = last_queued_time_for_host.get(
                    ah['id'], 0)
                if time.time(
                ) - last_data_pull_time_for_view < args.check_interval:
                    # logging.debug('Not pulling data as args.check_interval not passed yet [host_id %s]', ah['id'])
                    continue

                logging.info('Putting %s to queue...', ah['ui_shortname'])
                queue.put({
                    'id': ah['id'],
                    'ui_shortname': ah['ui_shortname'],
                    'is_first_loop': is_first_loop,
                    'queued_on': time.time()
                })
                last_queued_time_for_host[ah['id']] = time.time()

            if is_first_loop:
                is_first_loop = False

        logging.debug('Main thread sleeps...')
        time.sleep(5)