def run(dry_run):
    jira_boards = [j for j in queries.get_jira_boards()
                   if j.get('slack')]
    accounts = queries.get_state_aws_accounts()
    settings = queries.get_app_interface_settings()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    for index, jira_board in enumerate(jira_boards):
        if not is_in_shard_round_robin(jira_board['name'], index):
            continue
        jira, current_state = fetch_current_state(jira_board, settings)
        if not current_state:
            logging.warning(
                'not acting on empty Jira boards. ' +
                'please create a ticket to get started.'
            )
            continue
        previous_state = fetch_previous_state(state, jira.project)
        if previous_state:
            diffs = calculate_diff(jira.server, current_state, previous_state)
            act(dry_run, jira_board, diffs)
        if not dry_run:
            write_state(state, jira.project, current_state)
Beispiel #2
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")]
    oc_map = OC_Map(
        clusters=clusters,
        integration=QONTRACT_INTEGRATION,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = slackapi_from_queries(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(
            namespace="openshift-managed-upgrade-operator",
            kind="UpgradeConfig",
            allow_not_found=True,
        )["items"]
        if not upgrade_config:
            logging.debug(f"[{cluster}] UpgradeConfig not found.")
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config["spec"]
        upgrade_at = upgrade_spec["upgradeAt"]
        version = upgrade_spec["desired"]["version"]
        upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ")
        state_key = f"{cluster}-{upgrade_at}"
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(["cluster_upgrade", cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f"{cluster}-cluster"
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f"Heads up <!subteam^{usergroup_id}>! " +
                    f"cluster `{cluster}` is currently " +
                    f"being upgraded to version `{version}`")
def run(dry_run):
    accounts = queries.get_state_aws_accounts(reset_passwords=True)
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    for a in accounts:
        aws_api = None
        account_name = a['name']
        reset_passwords = a.get('resetPasswords')
        if not reset_passwords:
            continue
        for r in reset_passwords:
            user_name = r['user']['org_username']
            request_id = r['requestId']
            state_key = f"{account_name}/{user_name}/{request_id}"
            if state.exists(state_key):
                continue

            logging.info(['reset_password', account_name, user_name])
            if dry_run:
                continue

            if aws_api is None:
                aws_api = AWSApi(1, [a], settings=settings)

            aws_api.reset_password(account_name, user_name)
            aws_api.reset_mfa(account_name, user_name)
            state.add(state_key)
def run(dry_run, io_dir='throughput/', print_only=False,
        config_name=None, job_name=None, instance_name=None, defer=None):
    if not print_only and config_name is not None:
        raise Exception("--config-name must works with --print-only mode")
    jjb, additional_repo_urls = \
        init_jjb(instance_name, config_name, print_only)
    defer(jjb.cleanup)

    if print_only:
        jjb.print_jobs(job_name=job_name)
        if config_name is not None:
            jjb.generate(io_dir, 'printout')
        sys.exit(0)

    accounts = queries.get_state_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=jjb.settings
    )

    if dry_run:
        validate_repos_and_admins(jjb, additional_repo_urls)
        jjb.generate(io_dir, 'desired')
        jjb.overwrite_configs(state)
        jjb.generate(io_dir, 'current')
        jjb.print_diffs(io_dir, instance_name)
    else:
        jjb.update()
        configs = jjb.get_configs()
        for name, desired_config in configs.items():
            state.add(name, value=desired_config, force=True)
Beispiel #5
0
def get_version_history(dry_run, upgrade_policies, ocm_map):
    """Get a summary of versions history per OCM instance

    Args:
        dry_run (bool): save updated history to remote state
        upgrade_policies (list): query results of clusters upgrade policies
        ocm_map (OCMMap): OCM clients per OCM instance

    Returns:
        dict: version history per OCM instance
    """
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    results = {}
    # we keep a remote state per OCM instance
    for ocm_name in ocm_map.instances():
        history = state.get(ocm_name, {})
        update_history(history, upgrade_policies)
        results[ocm_name] = history
        if not dry_run:
            state.add(ocm_name, history, force=True)

    return results
Beispiel #6
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    users = queries.get_users()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    smtp_client = SmtpClient(settings=settings)
    mails = smtp_client.get_mails(criteria='SUBJECT "Sentry Access Request"',
                                  folder="[Gmail]/Sent Mail")
    user_names = get_sentry_users_from_mails(mails)
    if not dry_run:
        slack = slackapi_from_queries(QONTRACT_INTEGRATION,
                                      init_usergroups=False)
    for user_name in user_names:
        guesses = guess_user(user_name, users)
        if not guesses:
            logging.debug(f"no users guessed for {user_name}")
            continue
        slack_username = guesses[0].get(
            "slack_username") or guesses[0]["org_username"]
        if state.exists(slack_username):
            continue
        logging.info(["help_user", slack_username])
        if not dry_run:
            state.add(slack_username)
            slack.chat_post_message(
                f"yo <@{slack_username}>! it appears that you have " +
                "requested access to a project in Sentry. " +
                "access is managed automatically via app-interface. "
                "checkout https://url.corp.redhat.com/sentry-help")
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                allow_not_found=True)['items']
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
Beispiel #8
0
def ls(ctx, integration):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration, accounts, settings=settings)
    keys = state.ls()
    # if integration in not defined the 2th token will be the integration name
    key_index = 1 if integration else 2
    table_content = [
        {'integration': integration or k.split('/')[1],
         'key': '/'.join(k.split('/')[key_index:])}
        for k in keys]
    print_output({'output': 'table', 'sort': False},
                 table_content, ['integration', 'key'])
Beispiel #9
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    smtp_client = SmtpClient(settings=settings)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    credentials_requests = queries.get_credentials_requests()

    # validate no 2 requests have the same name
    credentials_requests_names = {r["name"] for r in credentials_requests}
    if len(credentials_requests) != len(credentials_requests_names):
        logging.error("request names must be unique.")
        sys.exit(1)

    error = False

    credentials_requests_to_send = [
        r for r in credentials_requests if not state.exists(r["name"])
    ]
    for credentials_request_to_send in credentials_requests_to_send:
        try:
            user = credentials_request_to_send["user"]
            credentials_name = credentials_request_to_send["credentials"]
            org_username = user["org_username"]
            logging.info(["send_credentials", org_username, credentials_name])

            request_name = credentials_request_to_send["name"]
            names = [org_username]
            subject = request_name
            encrypted_credentials = get_encrypted_credentials(
                credentials_name, user, settings)
            if not dry_run:
                body = MESSAGE_TEMPLATE.format(request_name, credentials_name,
                                               encrypted_credentials)
                smtp_client.send_mail(names, subject, body)
                state.add(request_name)
        except KeyError:
            logging.exception(
                f"Bad user details for {org_username} - {credentials_name}")
            error = True
        except CalledProcessError as e:
            logging.exception(f"Failed to handle GPG key for {org_username} "
                              f"({credentials_name}): {e.stdout}")
            error = True

    if error:
        sys.exit(1)
def run(
    dry_run: bool,
    thread_pool_size: int = 10,
    internal: Optional[bool] = None,
    use_jump_host: bool = True,
    defer=None,
    raise_errors=False,
):
    _LOG.debug("Collecting GQL data ...")
    namespaces = get_gql_namespaces_in_shard()

    inventory = LabelInventory()

    _LOG.debug("Initializing OC_Map ...")
    oc_map = get_oc_map(namespaces, internal, use_jump_host, thread_pool_size)
    defer(oc_map.cleanup)

    _LOG.debug("Collecting desired state ...")
    get_desired(inventory, oc_map, namespaces)

    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    _LOG.debug("Collecting managed state ...")
    get_managed(inventory, state)

    _LOG.debug("Collecting current state ...")
    get_current(inventory, oc_map, thread_pool_size)

    inventory.reconcile()

    realize(inventory, state, oc_map, dry_run, thread_pool_size)

    if inventory.has_any_error():
        error_messages = []
        for cluster, namespace, errs in inventory.iter_errors():
            for err in errs:
                msg = f"{cluster}/{namespace}: {err}"
                _LOG.error(msg)
                error_messages.append(msg)
        if raise_errors:
            raise NamespaceLabelError("\n".join(error_messages))
        sys.exit(1)
Beispiel #11
0
def run(dry_run):
    unleash_instances = queries.get_unleash_instances()
    accounts = queries.get_state_aws_accounts()
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    for unleash_instance in unleash_instances:
        instance_name = unleash_instance['name']
        current_state = fetch_current_state(unleash_instance)
        if not current_state:
            logging.warning('not acting on empty Unleash instances. ' +
                            'please create a feature toggle to get started.')
            continue
        previous_state = fetch_previous_state(state, instance_name)
        diffs = calculate_diff(current_state, previous_state)
        if diffs:
            act(dry_run, state, unleash_instance, diffs)
Beispiel #12
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings
    )
    emails = queries.get_app_interface_emails()
    smtp_client = SmtpClient(settings=settings)
    # validate no 2 emails have the same name
    email_names = {e["name"] for e in emails}
    if len(emails) != len(email_names):
        logging.error("email names must be unique.")
        sys.exit(1)

    emails_to_send = [e for e in emails if not state.exists(e["name"])]
    for email in emails_to_send:
        logging.info(["send_email", email["name"], email["subject"]])

        if not dry_run:
            names = collect_to(email["to"])
            subject = email["subject"]
            body = email["body"]
            smtp_client.send_mail(names, subject, body)
            state.add(email["name"])
def run(dry_run, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries(settings=settings)
    remove_candidates = []
    for query in queries_list:
        query_name = query["name"]

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            is_cronjob = query.get("schedule")
            if query_state != "DONE" and not is_cronjob:
                remove_candidates.append({
                    "name": query_name,
                    "timestamp": query_state,
                    "output": query["output"],
                })
            continue
        except KeyError:
            pass

        image_repository = "quay.io/app-sre"
        use_pull_secret = False
        sql_query_settings = settings.get("sqlQuery")
        if sql_query_settings:
            use_pull_secret = True
            image_repository = sql_query_settings["imageRepository"]
            pull_secret = sql_query_settings["pullSecret"]
            secret_resource = orb.fetch_provider_vault_secret(
                path=pull_secret["path"],
                version=pull_secret["version"],
                name=query_name,
                labels=pull_secret["labels"] or {},
                annotations=pull_secret["annotations"] or {},
                type=pull_secret["type"],
                integration=QONTRACT_INTEGRATION,
                integration_version=QONTRACT_INTEGRATION_VERSION,
            )

        job_yaml = process_template(query,
                                    image_repository=image_repository,
                                    use_pull_secret=use_pull_secret)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)
        oc_map = OC_Map(
            namespaces=[query["namespace"]],
            integration=QONTRACT_INTEGRATION,
            settings=settings,
            internal=None,
        )

        if use_pull_secret:
            openshift_apply(dry_run, oc_map, query, secret_resource)

        if query["output"] == "encrypted":
            render_kwargs = {
                "GPG_KEY": query["name"],
                "PUBLIC_GPG_KEY": query["public_gpg_key"],
            }
            template = jinja2.Template(CONFIGMAP_TEMPLATE)
            configmap_yaml = template.render(**render_kwargs)
            configmap = yaml.safe_load(configmap_yaml)
            configmap_resource = OpenshiftResource(
                configmap, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
            openshift_apply(dry_run, oc_map, query, configmap_resource)

        openshift_apply(dry_run, oc_map, query, job_resource)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate["timestamp"] + JOB_TTL:
            continue

        try:
            query = collect_queries(query_name=candidate["name"],
                                    settings=settings)[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f"in the app-interface while its Job is still "
                               f"not removed from the cluster. Manual clean "
                               f"up is needed.")

        oc_map = OC_Map(
            namespaces=[query["namespace"]],
            integration=QONTRACT_INTEGRATION,
            settings=settings,
            internal=None,
        )

        resource_types = ["Job", "Secret"]
        if candidate["output"] == "encrypted":
            resource_types.append("ConfigMap")
        for resource_type in resource_types:
            openshift_delete(dry_run, oc_map, query, resource_type,
                             enable_deletion)

        if not dry_run:
            state[candidate["name"]] = "DONE"
Beispiel #14
0
def run(dry_run, enable_deletion=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    queries_list = collect_queries(settings=settings)
    remove_candidates = []
    for query in queries_list:
        query_name = query['name']

        # Checking the sql-query state:
        # - No state: up for execution.
        # - State is a timestamp: executed and up for removal
        #   after the JOB_TTL
        # - State is 'DONE': executed and removed.
        try:
            query_state = state[query_name]
            is_cronjob = query.get('schedule')
            if query_state != 'DONE' and not is_cronjob:
                remove_candidates.append({'name': query_name,
                                          'timestamp': query_state,
                                          'output': query['output']})
            continue
        except KeyError:
            pass

        image_repository = 'quay.io/app-sre'
        use_pull_secret = False
        sql_query_settings = settings.get('sqlQuery')
        if sql_query_settings:
            use_pull_secret = True
            image_repository = sql_query_settings['imageRepository']
            pull_secret = sql_query_settings['pullSecret']
            secret_resource = orb.fetch_provider_vault_secret(
                path=pull_secret['path'],
                version=pull_secret['version'],
                name=query_name,
                labels=pull_secret['labels'] or {},
                annotations=pull_secret['annotations'] or {},
                type=pull_secret['type'],
                integration=QONTRACT_INTEGRATION,
                integration_version=QONTRACT_INTEGRATION_VERSION
            )

        job_yaml = process_template(query,
                                    image_repository=image_repository,
                                    use_pull_secret=use_pull_secret)
        job = yaml.safe_load(job_yaml)
        job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION,
                                         QONTRACT_INTEGRATION_VERSION)
        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=settings,
                        internal=None)

        if use_pull_secret:
            openshift_apply(dry_run, oc_map, query, secret_resource)

        if query['output'] == 'encrypted':
            render_kwargs = {
                'GPG_KEY': query['name'],
                'PUBLIC_GPG_KEY': query['public_gpg_key']
            }
            template = jinja2.Template(CONFIGMAP_TEMPLATE)
            configmap_yaml = template.render(**render_kwargs)
            configmap = yaml.safe_load(configmap_yaml)
            configmap_resource = OpenshiftResource(
                configmap, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
            openshift_apply(dry_run, oc_map, query, configmap_resource)

        openshift_apply(dry_run, oc_map, query, job_resource)

        if not dry_run:
            state[query_name] = time.time()

    for candidate in remove_candidates:
        if time.time() < candidate['timestamp'] + JOB_TTL:
            continue

        try:
            query = collect_queries(
                query_name=candidate['name'], settings=settings)[0]
        except IndexError:
            raise RuntimeError(f'sql-query {candidate["name"]} not present'
                               f'in the app-interface while its Job is still '
                               f'not removed from the cluster. Manual clean '
                               f'up is needed.')

        oc_map = OC_Map(namespaces=[query['namespace']],
                        integration=QONTRACT_INTEGRATION,
                        settings=settings,
                        internal=None)

        resource_types = ['Job', 'Secret']
        if candidate['output'] == 'encrypted':
            resource_types.append('ConfigMap')
        for resource_type in resource_types:
            openshift_delete(dry_run, oc_map, query,
                             resource_type, enable_deletion)

        if not dry_run:
            state[candidate['name']] = 'DONE'
def rm(ctx, integration, key):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration, accounts, settings=settings)
    state.rm(key)
def set(ctx, integration, key, value):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration, accounts, settings=settings)
    state.add(key, value=value, force=True)
def get(ctx, integration, key):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    state = State(integration, accounts, settings=settings)
    value = state.get(key)
    print(value)
def setup(thread_pool_size, internal, use_jump_host, integration,
          integration_version, v1, v2):
    """Setup required resources for triggering integrations

    Args:
        thread_pool_size (int): Thread pool size to use
        internal (bool): Should run for internal/extrenal/all clusters
        use_jump_host (bool): Should use jump host to reach clusters
        integration (string): Name of calling integration
        integration_version (string): Version of calling integration
        v1 (bool): Should trigger for v1 SaaS files
        v2 (bool): Should trigger for v2 SaaS files

    Returns:
        saasherder (SaasHerder): a SaasHerder instance
        jenkins_map (dict): Instance names with JenkinsApi instances
        oc_map (OC_Map): a dictionary of OC clients per cluster
        settings (dict): App-interface settings
        error (bool): True if one happened, False otherwise
    """

    saas_files = queries.get_saas_files(v1=v1, v2=v2)
    if not saas_files:
        logging.error("no saas files found")
        return None, None, None, None, True
    saas_files = [sf for sf in saas_files if is_in_shard(sf["name"])]

    # Remove saas-file targets that are disabled
    for saas_file in saas_files[:]:
        resource_templates = saas_file["resourceTemplates"]
        for rt in resource_templates[:]:
            targets = rt["targets"]
            for target in targets[:]:
                if target["disable"]:
                    targets.remove(target)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()
    pipelines_providers = queries.get_pipelines_providers()
    tkn_provider_namespaces = [
        pp["namespace"] for pp in pipelines_providers
        if pp["provider"] == "tekton"
    ]

    oc_map = OC_Map(
        namespaces=tkn_provider_namespaces,
        integration=integration,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=integration,
        integration_version=integration_version,
        settings=settings,
        jenkins_map=jenkins_map,
        accounts=accounts,
    )

    return saasherder, jenkins_map, oc_map, settings, False
Beispiel #19
0
def get_state() -> State:
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    return State(integration=QONTRACT_INTEGRATION,
                 accounts=accounts,
                 settings=settings)