예제 #1
0
def collect_state():
    state = []
    saas_files = queries.get_saas_files()
    for saas_file in saas_files:
        saas_file_path = saas_file['path']
        saas_file_name = saas_file['name']
        saas_file_parameters = json.loads(saas_file.get('parameters') or '{}')
        resource_templates = saas_file['resourceTemplates']
        for resource_template in resource_templates:
            resource_template_name = resource_template['name']
            resource_template_parameters = \
                json.loads(resource_template.get('parameters') or '{}')
            for target in resource_template['targets']:
                namespace = target['namespace']['name']
                cluster = target['namespace']['cluster']['name']
                target_ref = target['ref']
                target_parameters = \
                    json.loads(target.get('parameters') or '{}')
                parameters = {}
                parameters.update(saas_file_parameters)
                parameters.update(resource_template_parameters)
                parameters.update(target_parameters)
                state.append({
                    'saas_file_path': saas_file_path,
                    'saas_file_name': saas_file_name,
                    'resource_template_name': resource_template_name,
                    'cluster': cluster,
                    'namespace': namespace,
                    'ref': target_ref,
                    'parameters': parameters
                })
    return state
예제 #2
0
def collect_owners():
    owners = {}
    saas_files = queries.get_saas_files(v1=True, v2=True)
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        owners[saas_file_name] = set()
        owner_roles = saas_file.get('roles')
        if not owner_roles:
            continue
        for owner_role in owner_roles:
            owner_users = owner_role.get('users')
            if not owner_users:
                continue
            for owner_user in owner_users:
                owner_username = owner_user['org_username']
                if owner_user.get('tag_on_merge_requests'):
                    owner_username = f'@{owner_username}'
                owners[saas_file_name].add(owner_username)

    # make owners suitable for json dump
    ans = {}
    for k, v in owners.items():
        ans[k] = list(v)

    return ans
예제 #3
0
def collect_state():
    state = []
    saas_files = queries.get_saas_files(v1=True, v2=True)
    for saas_file in saas_files:
        saas_file_path = saas_file['path']
        saas_file_name = saas_file['name']
        saas_file_parameters = json.loads(saas_file.get('parameters') or '{}')
        saas_file_definitions = {
            'managed_resource_types':
            saas_file['managedResourceTypes'],
            'image_patterns':
            saas_file['imagePatterns'],
            'use_channel_in_image_tag':
            saas_file.get('use_channel_in_image_tag') or False,
        }
        resource_templates = saas_file['resourceTemplates']
        for resource_template in resource_templates:
            resource_template_name = resource_template['name']
            resource_template_parameters = \
                json.loads(resource_template.get('parameters') or '{}')
            resource_template_url = resource_template['url']
            for target in resource_template['targets']:
                namespace_info = target['namespace']
                namespace = namespace_info['name']
                cluster = namespace_info['cluster']['name']
                environment = namespace_info['environment']['name']
                target_ref = target['ref']
                target_delete = target.get('delete')
                target_parameters = \
                    json.loads(target.get('parameters') or '{}')
                parameters = {}
                parameters.update(saas_file_parameters)
                parameters.update(resource_template_parameters)
                parameters.update(target_parameters)
                state.append({
                    'saas_file_path':
                    saas_file_path,
                    'saas_file_name':
                    saas_file_name,
                    'resource_template_name':
                    resource_template_name,
                    'cluster':
                    cluster,
                    'namespace':
                    namespace,
                    'environment':
                    environment,
                    'url':
                    resource_template_url,
                    'ref':
                    target_ref,
                    'parameters':
                    parameters,
                    'saas_file_definitions':
                    copy.deepcopy(saas_file_definitions),
                    'delete':
                    target_delete,
                })
    return state
예제 #4
0
def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None):
    if env_name in [None, '']:
        print('env-name must be defined')
        return
    saas_files = queries.get_saas_files(saas_file_name, env_name, app_name,
                                        v1=True, v2=True)
    if not saas_files:
        print('no saas files found')
        sys.exit(1)
    for saas_file in saas_files:
        saas_file_parameters = \
            json.loads(saas_file.get('parameters') or '{}')
        for rt in saas_file['resourceTemplates']:
            url = rt['url']
            path = rt['path']
            rt_parameters = \
                json.loads(rt.get('parameters') or '{}')
            for target in rt['targets']:
                target_parameters = \
                    json.loads(target.get('parameters') or '{}')
                namespace = target['namespace']
                namespace_name = namespace['name']
                environment = namespace['environment']
                if environment['name'] != env_name:
                    continue
                ref = target['ref']
                environment_parameters = \
                    json.loads(environment.get('parameters') or '{}')
                parameters = {}
                parameters.update(environment_parameters)
                parameters.update(saas_file_parameters)
                parameters.update(rt_parameters)
                parameters.update(target_parameters)

                for replace_key, replace_value in parameters.items():
                    if not isinstance(replace_value, str):
                        continue
                    replace_pattern = '${' + replace_key + '}'
                    for k, v in parameters.items():
                        if not isinstance(v, str):
                            continue
                        if replace_pattern in v:
                            parameters[k] = \
                                v.replace(replace_pattern, replace_value)

                parameters_cmd = ''
                for k, v in parameters.items():
                    parameters_cmd += f" -p {k}=\"{v}\""
                raw_url = \
                    url.replace('github.com', 'raw.githubusercontent.com')
                if 'gitlab' in raw_url:
                    raw_url += '/raw'
                raw_url += '/' + ref
                raw_url += path
                cmd = "oc process --local --ignore-unknown-parameters" + \
                    f"{parameters_cmd} -f {raw_url}" + \
                    f" | oc apply -n {namespace_name} -f - --dry-run"
                print(cmd)
def run(dry_run, thread_pool_size=10):
    saas_files = queries.get_saas_files()
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    # Remove saas-file targets that are disabled
    for saas_file in saas_files[:]:
        resource_templates = saas_file['resourceTemplates']
        for rt in resource_templates[:]:
            targets = rt['targets']
            for target in targets[:]:
                if target['disable']:
                    targets.remove(target)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            accounts=accounts)

    trigger_specs = saasherder.get_moving_commits_diff(dry_run)
    already_triggered = []
    error = False
    for job_spec in trigger_specs:
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        instance_name = job_spec['instance_name']
        job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name,
                                                      settings)
        if job_name not in already_triggered:
            logging.info(['trigger_job', instance_name, job_name])
            if dry_run:
                already_triggered.append(job_name)

        if not dry_run:
            jenkins = jenkins_map[instance_name]
            try:
                if job_name not in already_triggered:
                    jenkins.trigger_job(job_name)
                    already_triggered.append(job_name)
                saasherder.update_moving_commit(job_spec)
            except Exception:
                error = True
                logging.error(
                    f"could not trigger job {job_name} in {instance_name}.")

    if error:
        sys.exit(1)
def run(dry_run, thread_pool_size=10):
    saas_files = queries.get_saas_files()
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        settings=settings,
        accounts=accounts)

    trigger_specs = saasherder.get_configs_diff()
    already_triggered = []

    error = True  # enter loop
    while error:
        error = False
        for job_spec in trigger_specs:
            saas_file_name = job_spec['saas_file_name']
            env_name = job_spec['env_name']
            instance_name = job_spec['instance_name']
            job_name = get_openshift_saas_deploy_job_name(
                saas_file_name, env_name, settings)
            if job_name not in already_triggered:
                logging.info(['trigger_job', instance_name, job_name])
                if dry_run:
                    already_triggered.append(job_name)

            if not dry_run:
                jenkins = jenkins_map[instance_name]
                try:
                    if job_name not in already_triggered:
                        jenkins.trigger_job(job_name)
                        already_triggered.append(job_name)
                    saasherder.update_config(job_spec)
                except Exception as e:
                    error = True
                    logging.error(
                        f"could not trigger job {job_name} " +
                        f"in {instance_name}. details: {str(e)}"
                    )

        if error:
            time.sleep(10)  # add to contants module once created
def run(dry_run):
    saas_files = queries.get_saas_files()
    settings = queries.get_app_interface_settings()
    saasherder = SaasHerder(saas_files,
                            thread_pool_size=1,
                            gitlab=None,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings)
    if not saasherder.valid:
        sys.exit(1)
def run(dry_run,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances)
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            jenkins_map=jenkins_map)
    if not saasherder.valid:
        sys.exit(1)
    if len(saasherder.namespaces) == 0:
        logging.warning('no targets found')
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    ob.realize_data(dry_run,
                    oc_map,
                    ri,
                    caller=saas_file_name,
                    wait_for_namespace=True,
                    no_dry_run_skip_compare=True,
                    take_over=saasherder.take_over)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run):
    saas_files = queries.get_saas_files(v1=True, v2=True)
    settings = queries.get_app_interface_settings()
    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=1,
        gitlab=None,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        settings=settings,
        validate=True)
    app_int_repos = queries.get_repos()
    missing_repos = [r for r in saasherder.repo_urls
                     if r not in app_int_repos]
    for r in missing_repos:
        logging.error(f'repo is missing from codeComponents: {r}')
    if not saasherder.valid or missing_repos:
        sys.exit(ExitCodes.ERROR)
def run(dry_run=False,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings)
    if not saasherder.valid:
        sys.exit(1)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name)

    if ri.has_error_registered():
        sys.exit(1)
예제 #11
0
def collect_state():
    state = []
    saas_files = queries.get_saas_files()
    for saas_file in saas_files:
        saas_file_path = saas_file['path']
        saas_file_name = saas_file['name']
        saas_file_parameters = json.loads(saas_file.get('parameters') or '{}')
        saas_file_definitions = {
            'managed_resource_types': saas_file['managedResourceTypes'],
            'image_patterns': saas_file['imagePatterns']
        }
        resource_templates = saas_file['resourceTemplates']
        for resource_template in resource_templates:
            resource_template_name = resource_template['name']
            resource_template_parameters = \
                json.loads(resource_template.get('parameters') or '{}')
            for target in resource_template['targets']:
                namespace_info = target['namespace']
                namespace = namespace_info['name']
                cluster = namespace_info['cluster']['name']
                environment = namespace_info['environment']['name']
                target_ref = target['ref']
                target_parameters = \
                    json.loads(target.get('parameters') or '{}')
                parameters = {}
                parameters.update(saas_file_parameters)
                parameters.update(resource_template_parameters)
                parameters.update(target_parameters)
                state.append({
                    'saas_file_path': saas_file_path,
                    'saas_file_name': saas_file_name,
                    'resource_template_name': resource_template_name,
                    'cluster': cluster,
                    'namespace': namespace,
                    'environment': environment,
                    'ref': target_ref,
                    'parameters': parameters,
                    'saas_file_definitions': saas_file_definitions
                })
    return state
예제 #12
0
def collect_state():
    state = []
    saas_files = queries.get_saas_files()
    for saas_file in saas_files:
        saas_file_path = saas_file['path']
        saas_file_name = saas_file['name']
        resource_templates = saas_file['resourceTemplates']
        for resource_template in resource_templates:
            resource_template_name = resource_template['name']
            for target in resource_template['targets']:
                namespace = target['namespace']['name']
                cluster = target['namespace']['cluster']['name']
                target_hash = target['hash']
                state.append({
                    'saas_file_path': saas_file_path,
                    'saas_file_name': saas_file_name,
                    'resource_template_name': resource_template_name,
                    'cluster': cluster,
                    'namespace': namespace,
                    'hash': target_hash
                })
    return state
예제 #13
0
def run(dry_run=False, thread_pool_size=10, defer=None):
    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    aws_accounts = queries.get_aws_accounts()
    gl = GitLabApi(instance, settings=settings)

    saas_files = queries.get_saas_files()
    saasherder = SaasHerder(saas_files,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings)
    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    enable_deletion = False if ri.has_error_registered() else True
    ob.realize_data(dry_run, oc_map, ri, enable_deletion=enable_deletion)
    saasherder.slack_notify(dry_run, aws_accounts, ri)
예제 #14
0
def collect_owners():
    owners = {}
    saas_files = queries.get_saas_files()
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        owners[saas_file_name] = set()
        owner_roles = saas_file.get('roles')
        if not owner_roles:
            continue
        for owner_role in owner_roles:
            owner_users = owner_role.get('users')
            if not owner_users:
                continue
            for owner_user in owner_users:
                owner_username = owner_user['org_username']
                owners[saas_file_name].add(owner_username)

    # make owners suitable for json dump
    for k in owners:
        owners[k] = list(owners[k])

    return owners
def collect_saas_file_configs():
    # collect a list of jobs per saas file per environment.
    # each saas_file_config should have the structure described
    # in the above query.
    # to make things understandable, each variable used to form
    # the structure will be called `jc_<variable>` (jenkins config).
    saas_file_configs = []
    saas_files = queries.get_saas_files()
    settings = queries.get_app_interface_settings()
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        jc_instance = saas_file['instance']
        # currently ignoring the actual Slack workspace
        # as that is configured in Jenkins.
        # revisit this if we support more then a single Slack workspace.
        slack_channel = saas_file['slack']['channel']
        for resource_template in saas_file['resourceTemplates']:
            for target in resource_template['targets']:
                namespace = target['namespace']
                env_name = namespace['environment']['name']
                upstream = target.get('upstream', '')
                job_template_name = settings['saasDeployJobTemplate']
                if upstream:
                    job_template_name += '-with-upstream'
                app_name = namespace['app']['name']
                jc_name = get_openshift_saas_deploy_job_name(
                    saas_file_name, env_name, settings)
                existing_configs = \
                    [c for c in saas_file_configs if c['name'] == jc_name]
                if existing_configs:
                    continue

                # each config is a list with a single item
                # with the following structure:
                # project:
                #   name: 'openshift-saas-deploy-{saas_file_name}-{env_name}'
                #   saas_file_name: '{saas_file_name}'
                #   env_name: '{env_name}'
                #   app_name: '{app_name}'
                #   slack_channel: '{slack_channel}'
                #   jobs:
                #   - 'openshift-saas-deploy':
                #       display_name: display name of the job
                jc_config = json.dumps([{
                    'project': {
                        'name': jc_name,
                        'saas_file_name': saas_file_name,
                        'env_name': env_name,
                        'app_name': app_name,
                        'slack_channel': slack_channel,
                        'upstream': upstream,
                        'jobs': [{
                            job_template_name: {
                                'display_name': jc_name
                            }
                        }]
                    }
                }])
                saas_file_configs.append({
                    'name': jc_name,
                    'instance': jc_instance,
                    'type': 'jobs',
                    'config': jc_config
                })

    return saas_file_configs, settings
def get_apps_data(date, month_delta=1, thread_pool_size=10):
    apps = queries.get_apps()
    saas_files = queries.get_saas_files()
    jjb, _ = init_jjb()
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())

    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET})
    dashdotdb_url = secret_content['url']
    dashdotdb_user = secret_content['username']
    dashdotdb_pass = secret_content['password']
    auth = (dashdotdb_user, dashdotdb_pass)
    vuln_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/imagemanifestvuln/metrics', auth=auth).text
    validt_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/deploymentvalidation/metrics', auth=auth).text
    slo_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/serviceslometrics/metrics', auth=auth).text
    namespaces = queries.get_namespaces()

    build_jobs = jjb.get_all_jobs(job_types=['build'])
    jobs_to_get = build_jobs.copy()

    saas_deploy_jobs = []
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        for template in saas_file["resourceTemplates"]:
            for target in template["targets"]:
                job = {}
                job['env'] = target["namespace"]["environment"]["name"]
                job['app'] = target["namespace"]["app"]["name"]
                job['cluster'] = target['namespace']['cluster']['name']
                job['namespace'] = target['namespace']['name']
                job['name'] = get_openshift_saas_deploy_job_name(
                    saas_file_name, job['env'], settings)
                job['saas_file_name'] = saas_file_name
                job['instance'] = saas_file["instance"]["name"]
                saas_deploy_jobs.append(job)
                if job['instance'] not in jobs_to_get:
                    jobs_to_get[job['instance']] = [job]
                else:
                    jobs_to_get[job['instance']].append(job)

    job_history = get_build_history_pool(jenkins_map, jobs_to_get,
                                         timestamp_limit, thread_pool_size)

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']

        logging.info(f"collecting post-deploy jobs "
                     f"information for {app_name}")
        post_deploy_jobs = {}
        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']

            # Only jobs of these types are expected to have a
            # further post-deploy job
            if not any([
                    'Deployment' in resource_types, 'DeploymentConfig'
                    not in resource_types
            ]):
                continue

            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:
                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']
                    post_deploy_jobs[cluster] = {}
                    post_deploy_jobs[cluster][namespace] = False

        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']
            if 'Job' not in resource_types:
                continue
            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:

                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']

                    # This block skips the check if the cluster/namespace
                    # has no Deployment/DeploymentConfig job associated.
                    if cluster not in post_deploy_jobs:
                        continue
                    if namespace not in post_deploy_jobs[cluster]:
                        continue

                    # Post-deploy job must depend on a openshift-saas-deploy
                    # job
                    if target['upstream'] is None:
                        continue
                    if target['upstream'].startswith('openshift-saas-deploy-'):
                        post_deploy_jobs[cluster][namespace] = True

        app['post_deploy_jobs'] = post_deploy_jobs

        logging.info(f"collecting promotion history for {app_name}")
        app["promotions"] = {}
        for job in saas_deploy_jobs:
            if job['app'] != app_name:
                continue
            if job['name'] not in job_history:
                continue
            history = job_history[job["name"]]
            saas_file_name = job['saas_file_name']
            if saas_file_name not in app["promotions"]:
                app["promotions"][saas_file_name] = [{
                    "env":
                    job["env"],
                    "cluster":
                    job["cluster"],
                    "namespace":
                    job["namespace"],
                    **history
                }]
            else:
                app["promotions"][saas_file_name].append({
                    "env":
                    job["env"],
                    "cluster":
                    job["cluster"],
                    "namespace":
                    job["namespace"],
                    **history
                })

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for jobs in build_jobs.values():
            for job in jobs:
                try:
                    repo_url = get_repo_url(job)
                except KeyError:
                    continue
                if repo_url not in code_repos:
                    continue
                if job['name'] not in job_history:
                    continue
                history = job_history[job['name']]
                if repo_url not in app["merge_activity"]:
                    app["merge_activity"][repo_url] = [{
                        "branch": job["branch"],
                        **history
                    }]
                else:
                    app["merge_activity"][repo_url].append({
                        "branch":
                        job["branch"],
                        **history
                    })

        logging.info(f"collecting dashdotdb information for {app_name}")
        app_namespaces = []
        for namespace in namespaces:
            if namespace['app']['name'] != app['name']:
                continue
            app_namespaces.append(namespace)
        vuln_mx = {}
        validt_mx = {}
        slo_mx = {}
        for family in text_string_to_metric_families(vuln_metrics):
            for sample in family.samples:
                if sample.name == 'imagemanifestvuln_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        severity = sample.labels['severity']
                        if cluster not in vuln_mx:
                            vuln_mx[cluster] = {}
                        if namespace not in vuln_mx[cluster]:
                            vuln_mx[cluster][namespace] = {}
                        if severity not in vuln_mx[cluster][namespace]:
                            value = int(sample.value)
                            vuln_mx[cluster][namespace][severity] = value
        for family in text_string_to_metric_families(validt_metrics):
            for sample in family.samples:
                if sample.name == 'deploymentvalidation_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        validation = sample.labels['validation']
                        # dvo: fail == 1, pass == 0, py: true == 1, false == 0
                        # so: ({false|pass}, {true|fail})
                        status = ('Passed',
                                  'Failed')[int(sample.labels['status'])]
                        if cluster not in validt_mx:
                            validt_mx[cluster] = {}
                        if namespace not in validt_mx[cluster]:
                            validt_mx[cluster][namespace] = {}
                        if validation not in validt_mx[cluster][namespace]:
                            validt_mx[cluster][namespace][validation] = {}
                        if status not in validt_mx[cluster][namespace][
                                validation]:  # noqa: E501
                            validt_mx[cluster][namespace][validation][
                                status] = {}  # noqa: E501
                        value = int(sample.value)
                        validt_mx[cluster][namespace][validation][
                            status] = value  # noqa: E501
        for family in text_string_to_metric_families(slo_metrics):
            for sample in family.samples:
                if sample.name == 'serviceslometrics':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        slo_doc_name = sample.labels['slodoc']
                        slo_name = sample.labels['name']
                        if cluster not in slo_mx:
                            slo_mx[cluster] = {}
                        if namespace not in slo_mx[cluster]:
                            slo_mx[cluster][namespace] = {}
                        if slo_doc_name not in slo_mx[cluster][namespace]:  # pylint: disable=line-too-long # noqa: E501
                            slo_mx[cluster][namespace][slo_doc_name] = {}
                        if slo_name not in slo_mx[cluster][namespace][
                                slo_doc_name]:  # noqa: E501
                            slo_mx[cluster][namespace][slo_doc_name][
                                slo_name] = {  # noqa: E501
                                    sample.labels['type']: sample.value
                                }
                        else:
                            slo_mx[cluster][namespace][slo_doc_name][
                                slo_name].update({  # pylint: disable=line-too-long # noqa: E501
                                    sample.labels['type']:
                                    sample.value
                                })
        app['container_vulnerabilities'] = vuln_mx
        app['deployment_validations'] = validt_mx
        app['service_slo'] = slo_mx

    return apps
def run(
    dry_run,
    thread_pool_size=10,
    io_dir="throughput/",
    saas_file_name=None,
    env_name=None,
    gitlab_project_id=None,
    defer=None,
):
    all_saas_files = queries.get_saas_files(v1=True, v2=True)
    saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True)
    app_interface_settings = queries.get_app_interface_settings()
    if not saas_files:
        logging.error("no saas files found")
        sys.exit(ExitCodes.ERROR)

    # notify different outputs (publish results, slack notifications)
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    notify = not dry_run and len(saas_files) == 1
    if notify:
        saas_file = saas_files[0]
        slack_info = saas_file.get("slack")
        if slack_info:
            slack = slackapi_from_slack_workspace(
                slack_info,
                app_interface_settings,
                QONTRACT_INTEGRATION,
                init_usergroups=False,
            )
            # support built-in start and end slack notifications
            # only in v2 saas files
            if saas_file["apiVersion"] == "v2":
                ri = ResourceInventory()
                console_url = compose_console_url(saas_file, saas_file_name, env_name)
                # deployment result notification
                defer(
                    lambda: slack_notify(
                        saas_file_name,
                        env_name,
                        slack,
                        ri,
                        console_url,
                        in_progress=False,
                    )
                )
                # deployment start notification
                slack_notifications = slack_info.get("notifications")
                if slack_notifications and slack_notifications.get("start"):
                    slack_notify(
                        saas_file_name,
                        env_name,
                        slack,
                        ri,
                        console_url,
                        in_progress=True,
                    )
        else:
            slack = None

    instance = queries.get_gitlab_instance()
    # instance exists in v1 saas files only
    desired_jenkins_instances = [
        s["instance"]["name"] for s in saas_files if s.get("instance")
    ]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances
    )
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        settings=settings,
        jenkins_map=jenkins_map,
        accounts=accounts,
    )
    if len(saasherder.namespaces) == 0:
        logging.warning("no targets found")
        sys.exit(ExitCodes.SUCCESS)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        init_api_resources=True,
        cluster_admin=saasherder.cluster_admin,
    )
    defer(oc_map.cleanup)
    saasherder.populate_desired_state(ri)

    # validate that this deployment is valid
    # based on promotion information in targets
    if not saasherder.validate_promotions():
        logging.error("invalid promotions")
        ri.register_error()
        sys.exit(ExitCodes.ERROR)

    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    actions = ob.realize_data(
        dry_run,
        oc_map,
        ri,
        thread_pool_size,
        caller=saas_file_name,
        wait_for_namespace=True,
        no_dry_run_skip_compare=(not saasherder.compare),
        take_over=saasherder.take_over,
    )

    if not dry_run:
        if saasherder.publish_job_logs:
            try:
                ob.follow_logs(oc_map, actions, io_dir)
            except Exception as e:
                logging.error(str(e))
                ri.register_error()
        try:
            ob.validate_data(oc_map, actions)
        except Exception as e:
            logging.error(str(e))
            ri.register_error()

    # publish results of this deployment
    # based on promotion information in targets
    success = not ri.has_error_registered()
    # only publish promotions for deployment jobs (a single saas file)
    if notify:
        # Auto-promote next stages only if there are changes in the
        # promoting stage. This prevents trigger promotions on job re-runs
        auto_promote = len(actions) > 0
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)
        saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote)

    if not success:
        sys.exit(ExitCodes.ERROR)

    # send human readable notifications to slack
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    # - output is 'events'
    # - no errors were registered
    if notify and slack and actions and slack_info.get("output") == "events":
        for action in actions:
            message = (
                f"[{action['cluster']}] "
                + f"{action['kind']} {action['name']} {action['action']}"
            )
            slack.chat_post_message(message)
def run(dry_run,
        thread_pool_size=10,
        io_dir='throughput/',
        saas_file_name=None,
        env_name=None,
        gitlab_project_id=None,
        defer=None):
    all_saas_files = queries.get_saas_files()
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(ExitCodes.ERROR)

    instance = queries.get_gitlab_instance()
    desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances)
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            jenkins_map=jenkins_map,
                            accounts=accounts)
    if len(saasherder.namespaces) == 0:
        logging.warning('no targets found')
        sys.exit(ExitCodes.SUCCESS)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        init_api_resources=True)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)

    # validate that this deployment is valid
    # based on promotion information in targets
    if not saasherder.validate_promotions():
        logging.error('invalid promotions')
        sys.exit(ExitCodes.ERROR)

    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    actions = ob.realize_data(dry_run,
                              oc_map,
                              ri,
                              caller=saas_file_name,
                              wait_for_namespace=True,
                              no_dry_run_skip_compare=(not saasherder.compare),
                              take_over=saasherder.take_over)

    if not dry_run:
        if saasherder.publish_job_logs:
            try:
                ob.follow_logs(oc_map, actions, io_dir)
            except Exception as e:
                logging.error(str(e))
                ri.register_error()
        try:
            ob.validate_data(oc_map, actions)
        except Exception as e:
            logging.error(str(e))
            ri.register_error()

    # publish results of this deployment
    # based on promotion information in targets
    success = not ri.has_error_registered()
    # only publish promotions for deployment jobs (a single saas file)
    if not dry_run and len(saasherder.saas_files) == 1:
        mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id)
        saasherder.publish_promotions(success, all_saas_files, mr_cli)

    if not success:
        sys.exit(ExitCodes.ERROR)

    # send human readable notifications to slack
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    # - output is 'events'
    # - no errors were registered
    if not dry_run and len(saasherder.saas_files) == 1:
        saas_file = saasherder.saas_files[0]
        slack_info = saas_file.get('slack')
        if slack_info and actions and slack_info.get('output') == 'events':
            slack = init_slack(slack_info,
                               QONTRACT_INTEGRATION,
                               init_usergroups=False)
            for action in actions:
                message = \
                    f"[{action['cluster']}] " + \
                    f"{action['kind']} {action['name']} {action['action']}"
                slack.chat_post_message(message)
def setup(thread_pool_size, internal, use_jump_host, integration,
          integration_version, v1, v2):
    """Setup required resources for triggering integrations

    Args:
        thread_pool_size (int): Thread pool size to use
        internal (bool): Should run for internal/extrenal/all clusters
        use_jump_host (bool): Should use jump host to reach clusters
        integration (string): Name of calling integration
        integration_version (string): Version of calling integration
        v1 (bool): Should trigger for v1 SaaS files
        v2 (bool): Should trigger for v2 SaaS files

    Returns:
        saasherder (SaasHerder): a SaasHerder instance
        jenkins_map (dict): Instance names with JenkinsApi instances
        oc_map (OC_Map): a dictionary of OC clients per cluster
        settings (dict): App-interface settings
        error (bool): True if one happened, False otherwise
    """

    saas_files = queries.get_saas_files(v1=v1, v2=v2)
    if not saas_files:
        logging.error("no saas files found")
        return None, None, None, None, True
    saas_files = [sf for sf in saas_files if is_in_shard(sf["name"])]

    # Remove saas-file targets that are disabled
    for saas_file in saas_files[:]:
        resource_templates = saas_file["resourceTemplates"]
        for rt in resource_templates[:]:
            targets = rt["targets"]
            for target in targets[:]:
                if target["disable"]:
                    targets.remove(target)

    instance = queries.get_gitlab_instance()
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    gl = GitLabApi(instance, settings=settings)
    jenkins_map = jenkins_base.get_jenkins_map()
    pipelines_providers = queries.get_pipelines_providers()
    tkn_provider_namespaces = [
        pp["namespace"] for pp in pipelines_providers
        if pp["provider"] == "tekton"
    ]

    oc_map = OC_Map(
        namespaces=tkn_provider_namespaces,
        integration=integration,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )

    saasherder = SaasHerder(
        saas_files,
        thread_pool_size=thread_pool_size,
        gitlab=gl,
        integration=integration,
        integration_version=integration_version,
        settings=settings,
        jenkins_map=jenkins_map,
        accounts=accounts,
    )

    return saasherder, jenkins_map, oc_map, settings, False
예제 #20
0
def get_apps_data(date, month_delta=1):
    apps = queries.get_apps()
    saas_files = queries.get_saas_files()
    jjb, _ = init_jjb()
    saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod'])
    build_master_jobs = jjb.get_all_jobs(job_types=['build-master'])
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())
    saas_build_history = \
        get_build_history(jenkins_map, saas_jobs, timestamp_limit)
    build_master_build_history = \
        get_build_history(jenkins_map, build_master_jobs, timestamp_limit)

    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET})
    dashdotdb_url = secret_content['url']
    dashdotdb_user = secret_content['username']
    dashdotdb_pass = secret_content['password']
    metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics',
                           auth=(dashdotdb_user, dashdotdb_pass)).text
    namespaces = queries.get_namespaces()

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']

        logging.info(f"collecting post-deploy jobs "
                     f"information for {app_name}")
        post_deploy_jobs = {}
        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']

            # Only jobs of these types are expected to have a
            # further post-deploy job
            if not any([
                    'Deployment' in resource_types, 'DeploymentConfig'
                    not in resource_types
            ]):
                continue

            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:
                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']
                    post_deploy_jobs[cluster] = {}
                    post_deploy_jobs[cluster][namespace] = False

        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']
            if 'Job' not in resource_types:
                continue
            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:

                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']

                    # This block skips the check if the cluster/namespace
                    # has no Deployment/DeploymentConfig job associated.
                    if cluster not in post_deploy_jobs:
                        continue
                    if namespace not in post_deploy_jobs[cluster]:
                        continue

                    # Post-deploy job must depend on a openshift-saas-deploy
                    # job
                    if target['upstream'] is None:
                        continue
                    if target['upstream'].startswith('openshift-saas-deploy-'):
                        post_deploy_jobs[cluster][namespace] = True

        app['post_deploy_jobs'] = post_deploy_jobs

        logging.info(f"collecting promotions for {app_name}")
        app['promotions'] = {}
        saas_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'saasrepo'
        ]
        for sr in saas_repos:
            sr_history = saas_build_history.get(sr)
            if not sr_history:
                continue
            successes = [h for h in sr_history if h == 'SUCCESS']
            app['promotions'][sr] = (len(sr_history), len(successes))

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for cr in code_repos:
            cr_history = build_master_build_history.get(cr)
            if not cr_history:
                continue
            successes = [h for h in cr_history if h == 'SUCCESS']
            app['merge_activity'][cr] = (len(cr_history), len(successes))

        logging.info(f"collecting vulnerabilities information for {app_name}")
        app_namespaces = []
        for namespace in namespaces:
            if namespace['app']['name'] != app['name']:
                continue
            app_namespaces.append(namespace)
        app_metrics = {}
        for family in text_string_to_metric_families(metrics):
            for sample in family.samples:
                if sample.name != 'imagemanifestvuln_total':
                    continue
                for app_namespace in app_namespaces:
                    cluster = sample.labels['cluster']
                    if app_namespace['cluster']['name'] != cluster:
                        continue
                    namespace = sample.labels['namespace']
                    if app_namespace['name'] != namespace:
                        continue
                    severity = sample.labels['severity']
                    if cluster not in app_metrics:
                        app_metrics[cluster] = {}
                    if namespace not in app_metrics[cluster]:
                        app_metrics[cluster][namespace] = {}
                    if severity not in app_metrics[cluster][namespace]:
                        value = int(sample.value)
                        app_metrics[cluster][namespace][severity] = value
        app['container_vulnerabilities'] = app_metrics

    return apps
예제 #21
0
def run(dry_run,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances)
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            jenkins_map=jenkins_map)
    if not saasherder.valid:
        sys.exit(1)
    if len(saasherder.namespaces) == 0:
        logging.warning('no targets found')
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        init_api_resources=True)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    actions = ob.realize_data(dry_run,
                              oc_map,
                              ri,
                              caller=saas_file_name,
                              wait_for_namespace=True,
                              no_dry_run_skip_compare=(not saasherder.compare),
                              take_over=saasherder.take_over)

    if ri.has_error_registered():
        sys.exit(1)

    # send human readable notifications to slack
    # we only do this if:
    # - this is not a dry run
    # - there is a single saas file deployed
    # - output is 'events'
    # - no errors were registered
    if not dry_run and len(saasherder.saas_files) == 1:
        saas_file = saasherder.saas_files[0]
        slack_info = saas_file.get('slack')
        if slack_info and actions and slack_info.get('output') == 'events':
            slack = init_slack(slack_info, QONTRACT_INTEGRATION)
            for action in actions:
                message = \
                    f"[{action['cluster']}] " + \
                    f"{action['kind']} {action['name']} {action['action']}"
                slack.chat_post_message(message)
예제 #22
0
def collect_saas_file_configs(settings, instance_name=None):
    # collect a list of jobs per saas file per environment.
    # each saas_file_config should have the structure described
    # in the above query.
    # to make things understandable, each variable used to form
    # the structure will be called `jc_<variable>` (jenkins config).
    saas_file_configs = []
    repo_urls = set()
    saas_files = queries.get_saas_files()
    job_template_name = settings['saasDeployJobTemplate']
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        jc_instance = saas_file['instance']
        if instance_name is not None and jc_instance['name'] != instance_name:
            continue
        app_name = saas_file['app']['name']
        # currently ignoring the actual Slack workspace
        # as that is configured in Jenkins.
        # revisit this if we support more then a single Slack workspace.
        output = saas_file['slack'].get('output') or 'publish'
        # if the output type is 'publish', we send notifications
        # to the selected slack_channel
        slack_channel = \
            saas_file['slack']['channel'] \
            if output == 'publish' \
            else 'dev-null'
        slack_notify_start = False
        slack_notifications = saas_file['slack'].get('notifications')
        if slack_notifications:
            start = slack_notifications.get('start')
            if start:
                slack_notify_start = True
        timeout = saas_file.get('timeout', None)
        for resource_template in saas_file['resourceTemplates']:
            url = resource_template['url']
            repo_urls.add(url)
            for target in resource_template['targets']:
                env_name = target['namespace']['environment']['name']
                upstream = target.get('upstream') or ''
                final_job_template_name = \
                    f'{job_template_name}-with-upstream' if upstream \
                    else job_template_name

                jc_name = get_openshift_saas_deploy_job_name(
                    saas_file_name, env_name, settings)
                existing_configs = \
                    [c for c in saas_file_configs if c['name'] == jc_name]
                if existing_configs:
                    # if upstream is defined - append it to existing upstreams
                    if upstream:
                        # should be exactly one
                        jc_data = existing_configs[0]['data']
                        project = jc_data['project']
                        # append upstream to existing upstreams
                        project['upstream'] += f',{upstream}'
                        # update job template name if needed
                        job_definition = project['jobs'][0]
                        if job_template_name in job_definition:
                            job_definition[final_job_template_name] = \
                                job_definition.pop(job_template_name)
                    continue

                # each config is a list with a single item
                # with the following structure:
                # project:
                #   name: 'openshift-saas-deploy-{saas_file_name}-{env_name}'
                #   saas_file_name: '{saas_file_name}'
                #   env_name: '{env_name}'
                #   app_name: '{app_name}'
                #   slack_channel: '{slack_channel}'
                #   slack_notify_start: '{slack_notify_start}
                #   jobs:
                #   - 'openshift-saas-deploy':
                #       display_name: display name of the job
                jc_data = {
                    'project': {
                        'name':
                        jc_name,
                        'saas_file_name':
                        saas_file_name,
                        'env_name':
                        env_name,
                        'app_name':
                        app_name,
                        'slack_channel':
                        slack_channel,
                        'slack_notify_start':
                        slack_notify_start,
                        'upstream':
                        upstream,
                        'jobs': [{
                            final_job_template_name: {
                                'display_name': jc_name
                            }
                        }]
                    }
                }
                if timeout:
                    jc_data['project']['timeout'] = timeout
                saas_file_configs.append({
                    'name': jc_name,
                    'instance': jc_instance,
                    'type': 'jobs',
                    'data': jc_data
                })

    for saas_file_config in saas_file_configs:
        jc_data = saas_file_config.pop('data')
        saas_file_config['config'] = json.dumps([jc_data])

    return saas_file_configs, repo_urls
def collect_saas_file_configs(settings, instance_name=None):
    # collect a list of jobs per saas file per environment.
    # each saas_file_config should have the structure described
    # in the above query.
    # to make things understandable, each variable used to form
    # the structure will be called `jc_<variable>` (jenkins config).
    saas_file_configs = []
    repo_urls = set()
    saas_files = queries.get_saas_files()
    job_template_name = settings["saasDeployJobTemplate"]
    for saas_file in saas_files:
        saas_file_name = saas_file["name"]
        jc_instance = saas_file["instance"]
        if instance_name is not None and jc_instance["name"] != instance_name:
            continue
        app_name = saas_file["app"]["name"]
        # currently ignoring the actual Slack workspace
        # as that is configured in Jenkins.
        # revisit this if we support more then a single Slack workspace.
        output = saas_file["slack"].get("output") or "publish"
        # if the output type is 'publish', we send notifications
        # to the selected slack_channel
        slack_channel = (saas_file["slack"]["channel"]
                         if output == "publish" else "dev-null")
        slack_notify_start = False
        slack_notifications = saas_file["slack"].get("notifications")
        if slack_notifications:
            start = slack_notifications.get("start")
            if start:
                slack_notify_start = True
        timeout = saas_file.get("timeout", None)
        for resource_template in saas_file["resourceTemplates"]:
            url = resource_template["url"]
            repo_urls.add(url)
            for target in resource_template["targets"]:
                env_name = target["namespace"]["environment"]["name"]
                upstream = target.get("upstream") or ""
                final_job_template_name = (f"{job_template_name}-with-upstream"
                                           if upstream else job_template_name)

                jc_name = get_openshift_saas_deploy_job_name(
                    saas_file_name, env_name, settings)
                existing_configs = [
                    c for c in saas_file_configs if c["name"] == jc_name
                ]
                if existing_configs:
                    # if upstream is defined - append it to existing upstreams
                    if upstream:
                        # should be exactly one
                        jc_data = existing_configs[0]["data"]
                        project = jc_data["project"]
                        # append upstream to existing upstreams
                        project["upstream"] += f",{upstream}"
                        # update job template name if needed
                        job_definition = project["jobs"][0]
                        if job_template_name in job_definition:
                            job_definition[
                                final_job_template_name] = job_definition.pop(
                                    job_template_name)
                    continue

                # each config is a list with a single item
                # with the following structure:
                # project:
                #   name: 'openshift-saas-deploy-{saas_file_name}-{env_name}'
                #   saas_file_name: '{saas_file_name}'
                #   env_name: '{env_name}'
                #   app_name: '{app_name}'
                #   slack_channel: '{slack_channel}'
                #   slack_notify_start: '{slack_notify_start}
                #   jobs:
                #   - 'openshift-saas-deploy':
                #       display_name: display name of the job
                jc_data = {
                    "project": {
                        "name":
                        jc_name,
                        "saas_file_name":
                        saas_file_name,
                        "env_name":
                        env_name,
                        "app_name":
                        app_name,
                        "slack_channel":
                        slack_channel,
                        "slack_notify_start":
                        slack_notify_start,
                        "upstream":
                        upstream,
                        "jobs": [{
                            final_job_template_name: {
                                "display_name": jc_name
                            }
                        }],
                    }
                }
                if timeout:
                    jc_data["project"]["timeout"] = timeout
                saas_file_configs.append({
                    "name": jc_name,
                    "instance": jc_instance,
                    "type": "jobs",
                    "data": jc_data,
                })

    for saas_file_config in saas_file_configs:
        jc_data = saas_file_config.pop("data")
        saas_file_config["config"] = json.dumps([jc_data])

    return saas_file_configs, repo_urls