Beispiel #1
0
 def _get_aws_account_info(account):
     for account_info in queries.get_aws_accounts():
         if 'name' not in account_info:
             continue
         if account_info['name'] != account:
             continue
         return account_info
Beispiel #2
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    users = queries.get_users()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    mails = smtp_client.get_mails(criteria='SUBJECT "Sentry Access Request"',
                                  folder='[Gmail]/Sent Mail',
                                  settings=settings)
    user_names = get_sentry_users_from_mails(mails)
    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)
    for user_name in user_names:
        guesses = guess_user(user_name, users)
        if not guesses:
            logging.debug(f'no users guessed for {user_name}')
            continue
        slack_username = \
            guesses[0].get('slack_username') or guesses[0]['org_username']
        if state.exists(slack_username):
            continue
        logging.info(['help_user', slack_username])
        if not dry_run:
            state.add(slack_username)
            slack.chat_post_message(
                f'yo <@{slack_username}>! it appears that you have ' +
                'requested access to a project in Sentry. ' +
                'access is managed automatically via app-interface. '
                'checkout https://url.corp.redhat.com/sentry-help')
Beispiel #3
0
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    existing_keys = aws.get_users_keys()
    existing_keys_list = [
        key for user_key in existing_keys.values()
        for keys in user_key.values() for key in keys
    ]
    logging.info("found {} existing keys".format(len(existing_keys_list)))

    app_int_github_repos = queries.get_repos(server="https://github.com")
    all_repos = get_all_repos_to_scan(app_int_github_repos)
    logging.info("about to scan {} repos".format(len(all_repos)))

    results = threaded.run(
        git_secrets.scan_history,
        all_repos,
        thread_pool_size,
        existing_keys=existing_keys_list,
    )
    all_leaked_keys = [key for keys in results for key in keys]

    deleted_keys = aws_sos.get_deleted_keys(accounts)
    keys_to_delete = [
        {
            "account": account,
            "key": key
        } for key in all_leaked_keys
        for account, user_keys in existing_keys.items()
        if key in [uk for uks in user_keys.values()
                   for uk in uks] and key not in deleted_keys[account]
    ]
    aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Beispiel #4
0
def get_version_history(dry_run, upgrade_policies, ocm_map):
    """Get a summary of versions history per OCM instance

    Args:
        dry_run (bool): save updated history to remote state
        upgrade_policies (list): query results of clusters upgrade policies
        ocm_map (OCMMap): OCM clients per OCM instance

    Returns:
        dict: version history per OCM instance
    """
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    results = {}
    # we keep a remote state per OCM instance
    for ocm_name in ocm_map.instances():
        history = state.get(ocm_name, {})
        update_history(history, upgrade_policies)
        results[ocm_name] = history
        if not dry_run:
            state.add(ocm_name, history, force=True)

    return results
def aws_accounts(ctx, name):
    accounts = queries.get_aws_accounts(name=name)
    if not accounts:
        print('no aws accounts found')
        sys.exit(1)
    columns = ['name', 'consoleUrl']
    print_output(ctx.obj['options'], accounts, columns)
def run(dry_run):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    emails = queries.get_app_interface_emails()
    smtp_client = SmtpClient(settings=settings)
    # validate no 2 emails have the same name
    email_names = set([e['name'] for e in emails])
    if len(emails) != len(email_names):
        logging.error('email names must be unique.')
        sys.exit(1)

    emails_to_send = [e for e in emails if not state.exists(e['name'])]
    for email in emails_to_send:
        logging.info(['send_email', email['name'], email['subject']])

        if not dry_run:
            names = collect_to(email['to'])
            subject = email['subject']
            body = email['body']
            smtp_client.send_mail(names, subject, body)
            state.add(email['name'])
Beispiel #7
0
def run(dry_run,
        gitlab_project_id=None,
        thread_pool_size=10,
        enable_deletion=False):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    deleted_keys = get_deleted_keys(accounts)
    existing_keys = aws.get_users_keys()
    aws_support_cases = aws.get_support_cases()
    keys_to_delete_from_cases = get_keys_to_delete(aws_support_cases)
    keys_to_delete = []
    for ktd in keys_to_delete_from_cases:
        ktd_account = ktd['account']
        ktd_key = ktd['key']
        account_deleted_keys = deleted_keys.get(ktd_account)
        if account_deleted_keys and ktd_key in account_deleted_keys:
            continue
        account_existing_keys = existing_keys.get(ktd_account)
        if account_existing_keys:
            keys_only = \
                itertools.chain.from_iterable(account_existing_keys.values())
            if ktd_key not in keys_only:
                continue
        keys_to_delete.append(ktd)

    act(dry_run, gitlab_project_id, accounts, keys_to_delete)
Beispiel #8
0
def run(dry_run):
    accounts = queries.get_aws_accounts(reset_passwords=True)
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    for a in accounts:
        aws_api = None
        account_name = a['name']
        reset_passwords = a.get('resetPasswords')
        if not reset_passwords:
            continue
        for r in reset_passwords:
            user_name = r['user']['org_username']
            request_id = r['requestId']
            state_key = f"{account_name}/{user_name}/{request_id}"
            if state.exists(state_key):
                continue

            logging.info(['reset_password', account_name, user_name])
            if dry_run:
                continue

            if aws_api is None:
                aws_api = AWSApi(1, [a], settings=settings)

            aws_api.reset_password(account_name, user_name)
            state.add(state_key)
def init(gitlab_project_id=None, sqs_or_gitlab=None):
    """
    Creates the Merge Request client to of a given type.

    :param gitlab_project_id: used when the client type is 'gitlab'
    :param sqs_or_gitlab: 'gitlab' or 'sqs'
    :return: an instance of the selected MR client.
    """
    if sqs_or_gitlab is None:
        settings = queries.get_app_interface_settings()
        client_type = settings.get('mergeRequestGateway', 'gitlab')
    else:
        client_type = sqs_or_gitlab

    if client_type == 'gitlab':
        if gitlab_project_id is None:
            raise MRClientGatewayError('Missing "gitlab_project_id".')

        instance = queries.get_gitlab_instance()
        settings = queries.get_app_interface_settings()
        saas_files = queries.get_saas_files_minimal(v1=True, v2=True)

        return GitLabApi(instance, project_id=gitlab_project_id,
                         settings=settings, saas_files=saas_files)

    elif client_type == 'sqs':
        accounts = queries.get_aws_accounts()
        settings = queries.get_app_interface_settings()

        return SQSGateway(accounts, settings=settings)

    else:
        raise MRClientGatewayError(f'Invalid client type: {client_type}')
def setup(dry_run, print_only, thread_pool_size, internal,
          use_jump_host, account_name, extra_labels):
    gqlapi = gql.get_api()
    accounts = queries.get_aws_accounts()
    if account_name:
        accounts = [n for n in accounts
                    if n['name'] == account_name]
        if not accounts:
            raise ValueError(f"aws account {account_name} is not found")
        extra_labels['shard_key'] = account_name
    settings = queries.get_app_interface_settings()
    namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces']
    tf_namespaces = filter_tf_namespaces(namespaces, account_name)
    ri, oc_map = fetch_current_state(dry_run, tf_namespaces, thread_pool_size,
                                     internal, use_jump_host, account_name)
    ts, working_dirs = init_working_dirs(accounts, thread_pool_size,
                                         oc_map=oc_map,
                                         settings=settings)
    tf = Terraform(QONTRACT_INTEGRATION,
                   QONTRACT_INTEGRATION_VERSION,
                   QONTRACT_TF_PREFIX,
                   accounts,
                   working_dirs,
                   thread_pool_size)
    existing_secrets = tf.get_terraform_output_secrets()
    clusters = [c for c in queries.get_clusters()
                if c.get('ocm') is not None]
    ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
                     settings=settings)
    ts.populate_resources(tf_namespaces, existing_secrets, account_name,
                          ocm_map=ocm_map)
    ts.dump(print_only, existing_dirs=working_dirs)

    return ri, oc_map, tf, tf_namespaces
def setup(dry_run, print_only, thread_pool_size, internal,
          use_jump_host, account_name):
    gqlapi = gql.get_api()
    accounts = queries.get_aws_accounts()
    if account_name:
        accounts = [n for n in accounts
                    if n['name'] == account_name]
        if not accounts:
            raise ValueError(f"aws account {account_name} is not found")
    settings = queries.get_app_interface_settings()
    namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces']
    tf_namespaces = [namespace_info for namespace_info in namespaces
                     if namespace_info.get('managedTerraformResources')]
    ri, oc_map = fetch_current_state(dry_run, tf_namespaces, thread_pool_size,
                                     internal, use_jump_host)
    ts, working_dirs = init_working_dirs(accounts, thread_pool_size,
                                         print_only=print_only,
                                         oc_map=oc_map,
                                         settings=settings)
    tf = Terraform(QONTRACT_INTEGRATION,
                   QONTRACT_INTEGRATION_VERSION,
                   QONTRACT_TF_PREFIX,
                   working_dirs,
                   thread_pool_size)
    existing_secrets = tf.get_terraform_output_secrets()
    ts.populate_resources(tf_namespaces, existing_secrets, account_name)
    ts.dump(print_only, existing_dirs=working_dirs)

    return ri, oc_map, tf
Beispiel #12
0
def run(dry_run,
        io_dir='throughput/',
        print_only=False,
        config_name=None,
        job_name=None,
        instance_name=None,
        defer=None):
    if not print_only and config_name is not None:
        raise Exception("--config-name must works with --print-only mode")
    jjb, additional_repo_urls = \
        init_jjb(instance_name, config_name, print_only)
    defer(lambda: jjb.cleanup())

    if print_only:
        jjb.print_jobs(job_name=job_name)
        if config_name is not None:
            jjb.generate(io_dir, 'printout')
        sys.exit(0)

    accounts = queries.get_aws_accounts()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=jjb.settings)

    if dry_run:
        validate_repos_and_admins(jjb, additional_repo_urls)
        jjb.generate(io_dir, 'desired')
        jjb.overwrite_configs(state)
        jjb.generate(io_dir, 'current')
        jjb.print_diffs(io_dir, instance_name)
    else:
        jjb.update()
        configs = jjb.get_configs()
        for name, desired_config in configs.items():
            state.add(name, value=desired_config, force=True)
Beispiel #13
0
def aws_accounts(ctx, name):
    accounts = queries.get_aws_accounts()
    if name:
        accounts = [a for a in accounts if a['name'] == name]

    columns = ['name', 'consoleUrl']
    print_output(ctx.obj['output'], accounts, columns)
def run(dry_run, thread_pool_size=10, io_dir="throughput/"):
    accounts = [a for a in queries.get_aws_accounts() if a.get("garbageCollection")]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run)
def run(dry_run, thread_pool_size=10):
    saas_files = queries.get_saas_files()
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    # Remove saas-file targets that are disabled
    for saas_file in saas_files[:]:
        resource_templates = saas_file['resourceTemplates']
        for rt in resource_templates[:]:
            targets = rt['targets']
            for target in targets[:]:
                if target['disable']:
                    targets.remove(target)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            accounts=accounts)

    trigger_specs = saasherder.get_moving_commits_diff(dry_run)
    already_triggered = []
    error = False
    for job_spec in trigger_specs:
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        instance_name = job_spec['instance_name']
        job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name,
                                                      settings)
        if job_name not in already_triggered:
            logging.info(['trigger_job', instance_name, job_name])
            if dry_run:
                already_triggered.append(job_name)

        if not dry_run:
            jenkins = jenkins_map[instance_name]
            try:
                if job_name not in already_triggered:
                    jenkins.trigger_job(job_name)
                    already_triggered.append(job_name)
                saasherder.update_moving_commit(job_spec)
            except Exception:
                error = True
                logging.error(
                    f"could not trigger job {job_name} in {instance_name}.")

    if error:
        sys.exit(1)
Beispiel #16
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                allow_not_found=True)['items']
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
Beispiel #17
0
def get_accounts_names():
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    # using Terrascript to filter out disabled accounts
    ts = Terrascript(tfr.QONTRACT_INTEGRATION,
                     tfr.QONTRACT_INTEGRATION_VERSION,
                     1,
                     accounts,
                     settings=settings)
    return ts.uids.keys()
def run(dry_run):
    accounts = queries.get_aws_accounts(sharing=True)
    sharing_accounts = filter_accounts(accounts)
    settings = queries.get_app_interface_settings()
    aws_api = AWSApi(1, sharing_accounts, settings=settings, init_users=False)

    for src_account in sharing_accounts:
        sharing = src_account.get("sharing")
        if not sharing:
            continue
        for share in sharing:
            if share["provider"] != "ami":
                continue
            dst_account = share["account"]
            regex = share["regex"]
            region = get_region(share, src_account, dst_account)
            src_amis = aws_api.get_amis_details(src_account, src_account, regex, region)
            dst_amis = aws_api.get_amis_details(dst_account, src_account, regex, region)

            for src_ami in src_amis:
                src_ami_id = src_ami["image_id"]
                found_dst_amis = [d for d in dst_amis if d["image_id"] == src_ami_id]
                if not found_dst_amis:
                    logging.info(
                        [
                            "share_ami",
                            src_account["name"],
                            dst_account["name"],
                            src_ami_id,
                        ]
                    )
                    if not dry_run:
                        aws_api.share_ami(
                            src_account, dst_account["uid"], src_ami_id, region
                        )
                    # we assume an unshared ami does not have tags
                    found_dst_amis = [{"image_id": src_ami_id, "tags": []}]

                dst_ami = found_dst_amis[0]
                dst_ami_id = dst_ami["image_id"]
                dst_ami_tags = dst_ami["tags"]
                if MANAGED_TAG not in dst_ami_tags:
                    logging.info(
                        ["tag_shared_ami", dst_account["name"], dst_ami_id, MANAGED_TAG]
                    )
                    if not dry_run:
                        aws_api.create_tag(dst_account, dst_ami_id, MANAGED_TAG)
                src_ami_tags = src_ami["tags"]
                for src_tag in src_ami_tags:
                    if src_tag not in dst_ami_tags:
                        logging.info(
                            ["tag_shared_ami", dst_account["name"], dst_ami_id, src_tag]
                        )
                        if not dry_run:
                            aws_api.create_tag(dst_account, dst_ami_id, src_tag)
Beispiel #19
0
def run(dry_run=False,
        print_only=False,
        enable_deletion=False,
        thread_pool_size=10,
        defer=None):
    settings = queries.get_app_interface_settings()
    desired_state = fetch_desired_state(settings)

    # check there are no repeated vpc connection names
    connection_names = [c['connection_name'] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicated vpc connection names found")
        sys.exit(1)

    participating_accounts = \
        [item['account'] for item in desired_state]
    participating_account_names = \
        [a['name'] for a in participating_accounts]
    accounts = [
        a for a in queries.get_aws_accounts()
        if a['name'] in participating_account_names
    ]

    ts = Terrascript(QONTRACT_INTEGRATION,
                     "",
                     thread_pool_size,
                     accounts,
                     settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_vpc_peerings(desired_state)
    working_dirs = ts.dump(print_only=print_only)

    if print_only:
        sys.exit()

    tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, "",
                   working_dirs, thread_pool_size)

    if tf is None:
        sys.exit(1)

    defer(lambda: tf.cleanup())

    deletions_detected, err = tf.plan(enable_deletion)
    if err:
        sys.exit(1)
    if deletions_detected and not enable_deletion:
        sys.exit(1)

    if dry_run:
        return

    err = tf.apply()
    if err:
        sys.exit(1)
Beispiel #20
0
def get_aws_account_info(account):
    """
    Gets all AWS accounts from app-interface and searches the
    desired one.
    """
    for account_info in queries.get_aws_accounts():
        if 'name' not in account_info:
            continue
        if account_info['name'] != account:
            continue
        return account_info
Beispiel #21
0
def run(dry_run=False,
        thread_pool_size=10,
        enable_deletion=False,
        io_dir='throughput/'):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    if dry_run:
        aws.simulate_deleted_users(io_dir)
    aws.map_resources()
    aws.delete_resources_without_owner(dry_run, enable_deletion)
def run(dry_run, thread_pool_size=10):
    saas_files = queries.get_saas_files()
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        settings=settings,
        accounts=accounts)

    trigger_specs = saasherder.get_configs_diff()
    already_triggered = []

    error = True  # enter loop
    while error:
        error = False
        for job_spec in trigger_specs:
            saas_file_name = job_spec['saas_file_name']
            env_name = job_spec['env_name']
            instance_name = job_spec['instance_name']
            job_name = get_openshift_saas_deploy_job_name(
                saas_file_name, env_name, settings)
            if job_name not in already_triggered:
                logging.info(['trigger_job', instance_name, job_name])
                if dry_run:
                    already_triggered.append(job_name)

            if not dry_run:
                jenkins = jenkins_map[instance_name]
                try:
                    if job_name not in already_triggered:
                        jenkins.trigger_job(job_name)
                        already_triggered.append(job_name)
                    saasherder.update_config(job_spec)
                except Exception as e:
                    error = True
                    logging.error(
                        f"could not trigger job {job_name} " +
                        f"in {instance_name}. details: {str(e)}"
                    )

        if error:
            time.sleep(10)  # add to contants module once created
Beispiel #23
0
def run(dry_run, vault_output_path=''):
    accounts = [a for a in queries.get_aws_accounts() if a.get('ecrs')]
    settings = queries.get_app_interface_settings()
    aws = AWSApi(1, accounts, settings=settings, init_ecr_auth_tokens=True)
    for account, data in aws.auth_tokens.items():
        dockercfg_secret_data = construct_dockercfg_secret_data(data)
        basic_auth_secret_data = construct_basic_auth_secret_data(data)
        write_output_to_vault(dry_run, vault_output_path, account,
                              dockercfg_secret_data, 'dockercfg')
        write_output_to_vault(dry_run, vault_output_path, account,
                              basic_auth_secret_data, 'basic-auth')
Beispiel #24
0
def ls(ctx, integration):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration, accounts, settings=settings)
    keys = state.ls()
    # if 'integration' is defined, the 0th token is empty
    table_content = [
        {'integration': k.split('/')[0] or integration,
         'key': '/'.join(k.split('/')[1:])}
        for k in keys]
    print_output('table', table_content, ['integration', 'key'])
def run(dry_run, thread_pool_size=10,
        disable_service_account_keys=False, defer=None):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    keys_to_delete = get_keys_to_delete(accounts)
    working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings)
    defer(lambda: cleanup(working_dirs))
    error = aws.delete_keys(dry_run, keys_to_delete, working_dirs,
                            disable_service_account_keys)
    if error:
        sys.exit(1)
def run(dry_run, gitlab_project_id):
    settings = queries.get_app_interface_settings()

    accounts = queries.get_aws_accounts()
    sqs_cli = SQSGateway(accounts, settings=settings)

    instance = queries.get_gitlab_instance()
    saas_files = queries.get_saas_files_minimal(v1=True, v2=True)
    gitlab_cli = GitLabApi(instance,
                           project_id=gitlab_project_id,
                           settings=settings,
                           saas_files=saas_files)

    errors_occured = False
    while True:
        messages = sqs_cli.receive_messages()
        logging.info('received %s messages', len(messages))

        if not messages:
            # sqs_cli.receive_messages delivers messages in chunks
            # until the queue is empty... when that happens,
            # we end this integration run
            break

        # not all integrations are going to resend their MR messages
        # therefore we need to be careful not to delete any messages
        # before they have been properly handled

        for m in messages:
            receipt_handle, body = m[0], m[1]
            logging.info('received message %s with body %s',
                         receipt_handle[:6], json.dumps(body))

            if not dry_run:
                try:
                    merge_request = mr.init_from_sqs_message(body)
                    merge_request.submit_to_gitlab(gitlab_cli=gitlab_cli)
                    sqs_cli.delete_message(str(receipt_handle))
                except mr.UnknownMergeRequestType as ex:
                    # Received an unknown MR type.
                    # This could be a producer being on a newer version
                    # of qontract-reconcile than the consumer.
                    # Therefore we don't delete it from the queue for
                    # potential future processing.
                    # TODO - monitor age of messages in queue
                    logging.warning(ex)
                    errors_occured = True
                except mr.MergeRequestProcessingError as processing_error:
                    logging.error(processing_error)
                    errors_occured = True

    if errors_occured:
        sys.exit(1)
Beispiel #27
0
def aws_creds(ctx, account_name):
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    accounts = queries.get_aws_accounts(name=account_name)
    if not accounts:
        print(f"{account_name} not found.")
        sys.exit(1)

    account = accounts[0]
    secret = secret_reader.read_all(account['automationToken'])
    print(f"export AWS_REGION={account['resourcesDefaultRegion']}")
    print(f"export AWS_ACCESS_KEY_ID={secret['aws_access_key_id']}")
    print(f"export AWS_SECRET_ACCESS_KEY={secret['aws_secret_access_key']}")
def run(dry_run=False):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(
        integration=QONTRACT_INTEGRATION,
        accounts=accounts,
        settings=settings
    )
    credentials_requests = queries.get_credentials_requests()

    # validate no 2 requests have the same name
    credentials_requests_names = \
        set([r['name'] for r in credentials_requests])
    if len(credentials_requests) != len(credentials_requests_names):
        logging.error('request names must be unique.')
        sys.exit(1)

    error = False

    credentials_requests_to_send = \
        [r for r in credentials_requests if not state.exists(r['name'])]
    for credentials_request_to_send in credentials_requests_to_send:
        user = credentials_request_to_send['user']
        org_username = user['org_username']
        public_gpg_key = user.get('public_gpg_key')
        credentials_name = credentials_request_to_send['credentials']
        if not public_gpg_key:
            error = True
            logging.error(
                f"user {org_username} does not have a public gpg key")
            continue
        logging.info(['send_credentials', org_username, credentials_name])

        if not dry_run:
            request_name = credentials_request_to_send['name']
            names = [org_username]
            subject = request_name
            ecrypted_credentials = \
                get_ecrypted_credentials(credentials_name, user, settings)
            if not ecrypted_credentials:
                error = True
                logging.error(
                    f"could not get encrypted credentials {credentials_name}")
                continue
            body = MESSAGE_TEMPLATE.format(
                request_name, credentials_name, ecrypted_credentials)
            smtp_client.send_mail(names, subject, body, settings=settings)
            state.add(request_name)

    if error:
        sys.exit(1)
def run(dry_run):
    unleash_instances = queries.get_unleash_instances()
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)
    for unleash_instance in unleash_instances:
        instance_name = unleash_instance['name']
        current_state = fetch_current_state(unleash_instance)
        previous_state = fetch_previous_state(state, instance_name)
        diffs = calculate_diff(current_state, previous_state)
        if diffs:
            act(dry_run, state, unleash_instance, diffs)
def ls(ctx, integration):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    state = State(integration, accounts, settings=settings)
    keys = state.ls()
    # if integration in not defined the 2th token will be the integration name
    key_index = 1 if integration else 2
    table_content = [{
        'integration': integration or k.split('/')[1],
        'key': '/'.join(k.split('/')[key_index:])
    } for k in keys]
    print_output({
        'output': 'table',
        'sort': False
    }, table_content, ['integration', 'key'])