Example #1
0
    def test_get_conn(monkeypatch, pg_db_mod):
        """Test obtaining DB connection"""
        class CustomCFG:
            """Custom config class to override DB config with"""

            def __init__(self, pg_db_mod):
                dsn = pg_db_mod.dsn()
                self.db_name = dsn['database']
                self.db_user = dsn['user']
                self.db_pass = None
                self.db_host = dsn['host']
                self.db_port = dsn['port']
                self.db_ssl_mode = 'disable'
                self.db_ssl_root_cert_path = '/dev/null'

        custom_cfg = CustomCFG(pg_db_mod)
        monkeypatch.setattr(tjc, 'CFG', custom_cfg)

        conn = tjc.get_conn()
        assert isinstance(conn, connection)
        assert conn.closed == 0

        cur = conn.cursor()
        cur.execute("""SELECT COUNT(*) FROM system_platform""")
        res = cur.fetchone()[0]
        assert res == 20

        cur.close()
        conn.close()

        assert conn.closed == 1
Example #2
0
def run():
    """Application entrypoint"""
    LOGGER.info('Started delete_systems job.')

    conn = get_conn()
    cur = conn.cursor()
    deleted = 0

    while True:
        curr_time = datetime.now(tz=pytz.utc)
        cur.execute(
            """SELECT inventory_id from system_platform sp
                       WHERE when_deleted IS NOT NULL
                         AND when_deleted < %s
                       LIMIT 1 FOR UPDATE OF sp""",
            (curr_time - timedelta(hours=CFG.system_deletion_threshold), ))
        inventory_id = cur.fetchone()
        if not inventory_id:
            break
        cur.execute("""SELECT deleted_inventory_id FROM delete_system(%s)""",
                    (inventory_id[0], ))
        success = cur.fetchone()
        if success:
            deleted += 1
        else:
            LOGGER.error("Unable to delete inventory_id: %s", inventory_id)
        conn.commit()
    cur.close()
    conn.close()

    LOGGER.info('Cleared %s deleted systems.', deleted)

    LOGGER.info('Finished delete_systems job.')
def run():
    """Application entrypoint"""
    LOGGER.info('Started stale_systems job.')

    conn = get_conn()
    cur = conn.cursor()
    updated = 0

    while True:
        cur.execute("""SELECT id from system_platform sp
                    WHERE when_deleted IS NULL
                      AND stale_warning_timestamp IS NOT NULL
                      AND stale = 'F'
                      AND now() > stale_warning_timestamp LIMIT 1 FOR UPDATE OF sp"""
                    )
        system_id = cur.fetchone()
        if not system_id:
            break
        cur.execute("""UPDATE system_platform SET stale = 'T' where id = %s""",
                    system_id)
        conn.commit()
        updated += 1
    cur.close()
    conn.close()

    LOGGER.info('Marked %s systems as stale.', updated)

    LOGGER.info('Finished stale_systems job.')
Example #4
0
def run():
    """Application entrypoint"""
    LOGGER.info("Started usage_metrics job")

    conn = get_conn()
    cur = conn.cursor()

    metrics_gatherer = MetricsGatherer(cur, accounts_blacklist_text=CFG.accounts_blacklist)
    metrics_gatherer.run()

    cur.close()
    conn.close()
    LOGGER.info("Finished usage_metrics job")
def run():
    """Application entrypoint"""
    LOGGER.info("Started cacheman job.")

    conn = get_conn()
    cur = conn.cursor()

    current_cache = {}
    cur.execute("""SELECT rh_account_id, cve_id, systems_affected, systems_status_divergent
                   FROM cve_account_cache""")
    for rh_account_id, cve_id, systems_affected, systems_status_divergent in cur.fetchall():
        current_cache.setdefault(rh_account_id, {})[cve_id] = (systems_affected, systems_status_divergent)

    cur.execute("""SELECT sp.rh_account_id, a.name, a.cve_cache_from,
                          GREATEST(MAX(sp.last_evaluation), MAX(sp.advisor_evaluated), MAX(sp.when_deleted),
                                   MAX(a.last_status_change)) AS last_system_change,
                          a.cve_cache_keepalive,
                          COUNT(*) AS total_systems
                   FROM system_platform sp INNER JOIN
                        rh_account a on sp.rh_account_id = a.id
                   GROUP BY sp.rh_account_id, a.name, a.cve_cache_from, a.cve_cache_keepalive
                   HAVING COUNT(*) >= %s""", (CFG.cache_minimal_account_systems,))
    accounts = [(account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive)
                for account_id, account_name, cve_cache_from, last_system_change, cve_cache_keepalive, _ in cur.fetchall()
                if validate_cve_cache_keepalive(cve_cache_keepalive, 2)]
    LOGGER.info("Accounts with enabled cache: %s", len(accounts))
    accounts_to_refresh = [account for account in accounts if account[3] and (not account[2] or account[3] > account[2])]
    LOGGER.info("Accounts requiring cache refresh: %s", len(accounts_to_refresh))

    # Process accounts in parallel
    with DatabasePool(CACHE_WORKERS):
        executor = BoundedExecutor(CACHE_WORKERS, max_workers=CACHE_WORKERS)
        futures = []
        for account_id, account_name, _, _, _ in accounts_to_refresh:
            futures.append(executor.submit(_materialize_account_cache, account_id, account_name, current_cache))
        for future in futures:
            future.result()
        executor.shutdown()
    # Pop out cached accounts after all workers are done
    for account_id, _, _, _, _ in accounts:
        current_cache.pop(account_id, None)

    LOGGER.info("Accounts to disable cache: %s", len(current_cache))
    for account_id in current_cache:
        cur.execute("""DELETE FROM cve_account_cache WHERE rh_account_id = %s""", (account_id,))
        cur.execute("""UPDATE rh_account SET cve_cache_from = NULL WHERE id = %s""", (account_id,))
        conn.commit()

    cur.close()
    conn.close()
    LOGGER.info("Finished cacheman job.")
Example #6
0
def run():
    """Application entrypoint"""
    LOGGER.info("Started db_metrics job")

    conn = get_conn()
    cur = conn.cursor()

    cur.execute("""SELECT COUNT(*) FROM system_platform""")
    METRIC_SYSTEMS.set(int(cur.fetchone()[0]))

    cur.execute("""SELECT COUNT(*) FROM inventory.hosts""")
    METRIC_CYNDI_SYSTEMS.set(int(cur.fetchone()[0]))

    cur.execute("""SELECT COUNT(*)
                   FROM system_platform sp LEFT JOIN
                        inventory.hosts ih ON sp.inventory_id = ih.id
                   WHERE ih.id IS NULL
                   AND sp.when_deleted IS NULL""")
    METRIC_SYSTEMS_MISSING_IN_CYNDI.set(int(cur.fetchone()[0]))

    cur.execute(
        """SELECT tablename AS key, pg_total_relation_size(quote_ident(tablename)) AS value
                   FROM (SELECT * FROM pg_catalog.pg_tables WHERE schemaname = 'public') t"""
    )
    for key, value in cur.fetchall():
        METRIC_TABLE_SIZE.labels(table=key).set(int(value))

    cur.execute("""SELECT a.name AS account, COUNT(*) AS total_systems
                   FROM system_platform sp JOIN rh_account a ON a.id = sp.rh_account_id
                   GROUP BY a.name ORDER BY 2 DESC LIMIT 10""")
    METRIC_TOP_10_ACCOUNTS_SYSTEMS.clear(
    )  # Need to reset because more than 10 labels would be exported when order changes
    for account, total_systems in cur.fetchall():
        METRIC_TOP_10_ACCOUNTS_SYSTEMS.labels(account=account).set(
            int(total_systems))

    cur.execute(
        """SELECT COUNT(*) FILTER (WHERE t.total_systems >= 1) AS at_least_1_sys,
                          COUNT(*) FILTER (WHERE t.total_systems >= 10) AS at_least_10_sys,
                          COUNT(*) FILTER (WHERE t.total_systems >= 100) AS at_least_100_sys,
                          COUNT(*) FILTER (WHERE t.total_systems >= 1000) AS at_least_1000_sys,
                          COUNT(*) FILTER (WHERE t.total_systems >= 10000) AS at_least_10000_sys,
                          COUNT(*) FILTER (WHERE t.total_systems >= 100000) AS at_least_100000_sys
                   FROM (SELECT a.name, COUNT(*) AS total_systems
                         FROM system_platform sp JOIN rh_account a ON a.id = sp.rh_account_id
                         GROUP BY a.name ORDER BY 2 DESC)t""")
    at_least_1_sys, at_least_10_sys, at_least_100_sys, at_least_1000_sys, at_least_10000_sys, at_least_100000_sys = cur.fetchone(
    )
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 1 system").set(int(at_least_1_sys))
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 10 systems").set(
        int(at_least_10_sys))
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 100 systems").set(
        int(at_least_100_sys))
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 1,000 systems").set(
        int(at_least_1000_sys))
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 10,000 systems").set(
        int(at_least_10000_sys))
    METRIC_ACCOUNTS_COUNT.labels(bucket=">= 100,000 systems").set(
        int(at_least_100000_sys))

    cur.close()
    conn.close()
    LOGGER.info("Finished db_metrics job")
Example #7
0
def sync(tmpdirname, content_version, playbooks_version):  # pylint: disable=too-many-branches, too-many-statements
    """Sync from dirs function"""

    content_git = f"{tmpdirname}/{CONTENT_GIT_NAME}"
    playbooks_git = f"{tmpdirname}/{PLAYBOOKS_GIT_NAME}"
    securiry_rules_dir = '{}/content'.format(content_git)
    playbooks_dir = '{}/playbooks/security'.format(playbooks_git)
    for git_dir in (securiry_rules_dir, playbooks_dir):
        if not os.path.isdir(git_dir):
            LOGGER.error("%s directory does not exist", git_dir)
            return

    risk_map = resolution_risk_map(
        os.path.join(content_git, 'content/config.yaml'))
    impact_map = impact_string_map(
        os.path.join(content_git, 'content/config.yaml'))

    conn = get_conn()

    cves_names = {}

    # pylint: disable=too-many-nested-blocks
    for file_name in os.listdir(securiry_rules_dir):
        dir_name = '{}/{}'.format(securiry_rules_dir, file_name)
        if os.path.isdir(dir_name):
            try:
                with open(os.path.join(dir_name, 'plugin.yaml'),
                          'r') as stream:
                    plugin_info = yaml.safe_load(stream)

                to_import = {}
                all_keys_cves = set()
                for nested_file in os.listdir(dir_name):
                    full_nested_path = os.path.join(dir_name, nested_file)
                    if os.path.isdir(full_nested_path):
                        with open(
                                os.path.join(full_nested_path,
                                             'metadata.yaml'), 'r') as stream:
                            metadata = yaml.safe_load(stream)
                        rule_playbook_dir = os.path.join(
                            playbooks_dir, file_name, nested_file, 'rhel_host')
                        playbooks = []
                        if os.path.isdir(rule_playbook_dir):
                            for play in os.listdir(rule_playbook_dir):
                                if play.endswith('_fixit.yml'):
                                    with open(
                                            os.path.join(
                                                rule_playbook_dir,
                                                play)) as play_stream:
                                        playbooks.append({
                                            'play':
                                            play_stream.read(),
                                            'version':
                                            playbooks_version
                                        })
                        rule = {
                            'active':
                            metadata.get('status') == 'active',
                            'change_risk':
                            risk_map.get(metadata.get('resolution_risk'), 1),
                            'cves':
                            metadata.get('cves'),
                            'description':
                            plugin_info.get('name') + ': ' +
                            metadata.get('description'),
                            'id':
                            '{}|{}'.format(file_name, nested_file),
                            'kbase_node_id':
                            plugin_info.get('node_id')
                            if plugin_info.get('node_id') else None,
                            'playbook_count':
                            len(playbooks),
                            'playbooks':
                            playbooks,
                            'publish_date':
                            datetime.strptime(metadata['publish_date'],
                                              '%Y-%m-%d %H:%M:%S')
                            if 'publish_date' in metadata else None,
                            'reboot_required':
                            plugin_info.get('reboot_required'),
                            'rule_impact':
                            impact_map.get(metadata.get('severity'), 1)
                        }
                        for cve in metadata.get('cves'):
                            all_keys_cves.add(cve)

                        for info_file, attribute in [
                            ('summary.md', 'summary'), ('reason.md', 'reason'),
                            ('resolution.md', 'resolution'),
                            ('more_info.md', 'more_info')
                        ]:
                            if os.path.exists(
                                    os.path.join(full_nested_path, info_file)):
                                with open(
                                        os.path.join(full_nested_path,
                                                     info_file)) as file_desc:
                                    rule[attribute] = file_desc.read()
                            else:
                                with open(os.path.join(
                                        dir_name, info_file)) as file_desc:
                                    rule[attribute] = file_desc.read()
                        to_import[rule['id']] = rule

                # create overall rule only without errorkey to store pass data
                rule_only = {}
                is_any_active = reduce(
                    lambda was, rule: was if was else rule["active"],
                    list(to_import.values()), False)
                rule_only["active"] = is_any_active
                rule_only["cves"] = list(all_keys_cves)
                rule_only["id"] = file_name
                import_into_db(conn, to_import, {rule_only["id"]: rule_only})
                for cve in all_keys_cves:
                    if plugin_info.get('name') is not None:
                        cves_names[cve] = plugin_info.get('name')

            except Exception:  # pylint: disable=broad-except
                LOGGER.exception('Error during rule parsing: ')
                send_slack_notification('Error in rules_git_sync: {}'.format(
                    traceback.format_exc()))

    store_versions(conn, content_version, playbooks_version)
    import_cves_names(conn, cves_names)
    conn.close()