Example #1
0
def sre_checkpoints(ctx):
    apps = queries.get_apps()

    parent_apps = {
        app['parentApp']['path']
        for app in apps
        if app.get('parentApp')
    }

    latest_sre_checkpoints = get_latest_sre_checkpoints()

    checkpoints_data = [
        {
            'name': full_name(app),
            'latest': latest_sre_checkpoints.get(full_name(app), '')
        }
        for app in apps
        if (app['path'] not in parent_apps and
            app['onboardingStatus'] == 'OnBoarded')
    ]

    checkpoints_data.sort(key=lambda c: c['latest'], reverse=True)

    columns = ['name', 'latest']
    print_output(ctx.obj['output'], checkpoints_data, columns)
Example #2
0
def get_parents_list() -> set[str]:
    """
    Get a set of all service names that are parents of another service
    """
    parent_set = set()
    apps = queries.get_apps()
    for a in apps:
        if a['parentApp'] is not None:
            parent_set.add(a['parentApp']['name'])

    return parent_set
Example #3
0
def get_app_list() -> dict:
    """
    Get a list of all services managed by app-interface with the parent app
    and the onboarding status of the service.
    """
    apps = queries.get_apps()
    app_info = {
        a['name']: {
            'onboardingStatus': a['onboardingStatus'],
            'parentApp': a['parentApp']
        }
        for a in apps
    }
    return app_info
Example #4
0
def get_app_list() -> dict:
    """
    Get a list of all services managed by app-interface with the parent app
    and the onboarding status of the service.
    """
    apps = queries.get_apps()
    app_info = {
        a["name"]: {
            "onboardingStatus": a["onboardingStatus"],
            "parentApp": a["parentApp"],
        }
        for a in apps
    }
    return app_info
Example #5
0
def get_apps_data(date, month_delta=1):
    apps = queries.get_apps()
    jjb = init_jjb()
    saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod'])
    build_master_jobs = jjb.get_all_jobs(job_types=['build-master'])
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())
    saas_build_history = \
        get_build_history(jenkins_map, saas_jobs, timestamp_limit)
    build_master_build_history = \
        get_build_history(jenkins_map, build_master_jobs, timestamp_limit)

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']
        logging.info(f"collecting promotions for {app_name}")
        app['promotions'] = {}
        saas_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'saasrepo'
        ]
        for sr in saas_repos:
            sr_history = saas_build_history.get(sr)
            if not sr_history:
                continue
            successes = [h for h in sr_history if h == 'SUCCESS']
            app['promotions'][sr] = (len(sr_history), len(successes))

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for cr in code_repos:
            cr_history = build_master_build_history.get(cr)
            if not cr_history:
                continue
            successes = [h for h in cr_history if h == 'SUCCESS']
            app['merge_activity'][cr] = (len(cr_history), len(successes))

    return apps
Example #6
0
    def ai_find_app(self, _, args):
        """Search apps registered in app-interface"""

        if len(args) < 1:
            return "Must supply a search argument"

        term = args[0].lower()
        if len(term) < 3:
            return f"Search term '{term}' is too short (min 3 characters)"

        server = self.config['gql_server']
        token = self.config['gql_token']
        gql.init(server, token)
        apps = queries.get_apps()

        found = []
        for app in apps:
            if term in app['name'].lower():
                found.append(app)

        if len(found) == 0:
            return f"No apps found for term '{term}'."

        return {'apps': found}
def collect_to(to):
    """Collect audience to send email to from to object

    Arguments:
        to {dict} -- AppInterfaceEmailAudience_v1 object

    Raises:
        AttributeError: Unknown alias

    Returns:
        set -- Audience to send email to
    """
    audience = set()

    aliases = to.get('aliases')
    if aliases:
        for alias in aliases:
            if alias == 'all-users':
                users = queries.get_users()
                to['users'] = users
            elif alias == 'all-service-owners':
                services = queries.get_apps()
                to['services'] = services
            else:
                raise AttributeError(f"unknown alias: {alias}")

    services = to.get('services')
    if services:
        for service in services:
            service_owners = service.get('serviceOwners')
            if not service_owners:
                continue

            for service_owner in service_owners:
                audience.add(service_owner['email'])

    clusters = to.get('clusters')
    if clusters:
        # TODO: implement this
        for cluster in clusters:
            pass

    namespaces = to.get('namespaces')
    if namespaces:
        # TODO: implement this
        for namespace in namespaces:
            pass

    aws_accounts = to.get('aws_accounts')
    if aws_accounts:
        for account in aws_accounts:
            account_owners = account.get('accountOwners')
            if not account_owners:
                continue

            for account_owner in account_owners:
                audience.add(account_owner['email'])

    roles = to.get('roles')
    if roles:
        for role in roles:
            users = role.get('users')
            if not users:
                continue

            for user in users:
                audience.add(user['org_username'])

    users = to.get('users')
    if users:
        for user in users:
            audience.add(user['org_username'])

    return audience
Example #8
0
def services(ctx):
    apps = queries.get_apps()
    columns = ['name', 'path', 'onboardingStatus']
    print_output(ctx.obj['output'], apps, columns)
Example #9
0
def collect_to(to):
    """Collect audience to send email to from to object

    Arguments:
        to {dict} -- AppInterfaceEmailAudience_v1 object

    Raises:
        AttributeError: Unknown alias

    Returns:
        set -- Audience to send email to
    """
    audience = set()

    aliases = to.get("aliases")
    if aliases:
        for alias in aliases:
            if alias == "all-users":
                users = queries.get_users()
                to["users"] = users
            elif alias == "all-service-owners":
                services = queries.get_apps()
                to["services"] = services
            else:
                raise AttributeError(f"unknown alias: {alias}")

    services = to.get("services")
    if services:
        for service in services:
            service_owners = service.get("serviceOwners")
            if not service_owners:
                continue

            for service_owner in service_owners:
                audience.add(service_owner["email"])

    # TODO: implement clusters and namespaces

    aws_accounts = to.get("aws_accounts")
    if aws_accounts:
        for account in aws_accounts:
            account_owners = account.get("accountOwners")
            if not account_owners:
                continue

            for account_owner in account_owners:
                audience.add(account_owner["email"])

    roles = to.get("roles")
    if roles:
        for role in roles:
            users = role.get("users")
            if not users:
                continue

            for user in users:
                audience.add(user["org_username"])

    users = to.get("users")
    if users:
        for user in users:
            audience.add(user["org_username"])

    return audience
def get_apps_data(date, month_delta=1, thread_pool_size=10):
    apps = queries.get_apps()
    saas_files = queries.get_saas_files()
    jjb, _ = init_jjb()
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())

    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET})
    dashdotdb_url = secret_content['url']
    dashdotdb_user = secret_content['username']
    dashdotdb_pass = secret_content['password']
    auth = (dashdotdb_user, dashdotdb_pass)
    vuln_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/imagemanifestvuln/metrics', auth=auth).text
    validt_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/deploymentvalidation/metrics', auth=auth).text
    slo_metrics = requests.get(
        f'{dashdotdb_url}/api/v1/serviceslometrics/metrics', auth=auth).text
    namespaces = queries.get_namespaces()

    build_jobs = jjb.get_all_jobs(job_types=['build'])
    jobs_to_get = build_jobs.copy()

    saas_deploy_jobs = []
    for saas_file in saas_files:
        saas_file_name = saas_file['name']
        for template in saas_file["resourceTemplates"]:
            for target in template["targets"]:
                job = {}
                job['env'] = target["namespace"]["environment"]["name"]
                job['app'] = target["namespace"]["app"]["name"]
                job['cluster'] = target['namespace']['cluster']['name']
                job['namespace'] = target['namespace']['name']
                job['name'] = get_openshift_saas_deploy_job_name(
                    saas_file_name, job['env'], settings)
                job['saas_file_name'] = saas_file_name
                job['instance'] = saas_file["instance"]["name"]
                saas_deploy_jobs.append(job)
                if job['instance'] not in jobs_to_get:
                    jobs_to_get[job['instance']] = [job]
                else:
                    jobs_to_get[job['instance']].append(job)

    job_history = get_build_history_pool(jenkins_map, jobs_to_get,
                                         timestamp_limit, thread_pool_size)

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']

        logging.info(f"collecting post-deploy jobs "
                     f"information for {app_name}")
        post_deploy_jobs = {}
        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']

            # Only jobs of these types are expected to have a
            # further post-deploy job
            if not any([
                    'Deployment' in resource_types, 'DeploymentConfig'
                    not in resource_types
            ]):
                continue

            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:
                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']
                    post_deploy_jobs[cluster] = {}
                    post_deploy_jobs[cluster][namespace] = False

        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']
            if 'Job' not in resource_types:
                continue
            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:

                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']

                    # This block skips the check if the cluster/namespace
                    # has no Deployment/DeploymentConfig job associated.
                    if cluster not in post_deploy_jobs:
                        continue
                    if namespace not in post_deploy_jobs[cluster]:
                        continue

                    # Post-deploy job must depend on a openshift-saas-deploy
                    # job
                    if target['upstream'] is None:
                        continue
                    if target['upstream'].startswith('openshift-saas-deploy-'):
                        post_deploy_jobs[cluster][namespace] = True

        app['post_deploy_jobs'] = post_deploy_jobs

        logging.info(f"collecting promotion history for {app_name}")
        app["promotions"] = {}
        for job in saas_deploy_jobs:
            if job['app'] != app_name:
                continue
            if job['name'] not in job_history:
                continue
            history = job_history[job["name"]]
            saas_file_name = job['saas_file_name']
            if saas_file_name not in app["promotions"]:
                app["promotions"][saas_file_name] = [{
                    "env":
                    job["env"],
                    "cluster":
                    job["cluster"],
                    "namespace":
                    job["namespace"],
                    **history
                }]
            else:
                app["promotions"][saas_file_name].append({
                    "env":
                    job["env"],
                    "cluster":
                    job["cluster"],
                    "namespace":
                    job["namespace"],
                    **history
                })

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for jobs in build_jobs.values():
            for job in jobs:
                try:
                    repo_url = get_repo_url(job)
                except KeyError:
                    continue
                if repo_url not in code_repos:
                    continue
                if job['name'] not in job_history:
                    continue
                history = job_history[job['name']]
                if repo_url not in app["merge_activity"]:
                    app["merge_activity"][repo_url] = [{
                        "branch": job["branch"],
                        **history
                    }]
                else:
                    app["merge_activity"][repo_url].append({
                        "branch":
                        job["branch"],
                        **history
                    })

        logging.info(f"collecting dashdotdb information for {app_name}")
        app_namespaces = []
        for namespace in namespaces:
            if namespace['app']['name'] != app['name']:
                continue
            app_namespaces.append(namespace)
        vuln_mx = {}
        validt_mx = {}
        slo_mx = {}
        for family in text_string_to_metric_families(vuln_metrics):
            for sample in family.samples:
                if sample.name == 'imagemanifestvuln_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        severity = sample.labels['severity']
                        if cluster not in vuln_mx:
                            vuln_mx[cluster] = {}
                        if namespace not in vuln_mx[cluster]:
                            vuln_mx[cluster][namespace] = {}
                        if severity not in vuln_mx[cluster][namespace]:
                            value = int(sample.value)
                            vuln_mx[cluster][namespace][severity] = value
        for family in text_string_to_metric_families(validt_metrics):
            for sample in family.samples:
                if sample.name == 'deploymentvalidation_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        validation = sample.labels['validation']
                        # dvo: fail == 1, pass == 0, py: true == 1, false == 0
                        # so: ({false|pass}, {true|fail})
                        status = ('Passed',
                                  'Failed')[int(sample.labels['status'])]
                        if cluster not in validt_mx:
                            validt_mx[cluster] = {}
                        if namespace not in validt_mx[cluster]:
                            validt_mx[cluster][namespace] = {}
                        if validation not in validt_mx[cluster][namespace]:
                            validt_mx[cluster][namespace][validation] = {}
                        if status not in validt_mx[cluster][namespace][
                                validation]:  # noqa: E501
                            validt_mx[cluster][namespace][validation][
                                status] = {}  # noqa: E501
                        value = int(sample.value)
                        validt_mx[cluster][namespace][validation][
                            status] = value  # noqa: E501
        for family in text_string_to_metric_families(slo_metrics):
            for sample in family.samples:
                if sample.name == 'serviceslometrics':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        slo_doc_name = sample.labels['slodoc']
                        slo_name = sample.labels['name']
                        if cluster not in slo_mx:
                            slo_mx[cluster] = {}
                        if namespace not in slo_mx[cluster]:
                            slo_mx[cluster][namespace] = {}
                        if slo_doc_name not in slo_mx[cluster][namespace]:  # pylint: disable=line-too-long # noqa: E501
                            slo_mx[cluster][namespace][slo_doc_name] = {}
                        if slo_name not in slo_mx[cluster][namespace][
                                slo_doc_name]:  # noqa: E501
                            slo_mx[cluster][namespace][slo_doc_name][
                                slo_name] = {  # noqa: E501
                                    sample.labels['type']: sample.value
                                }
                        else:
                            slo_mx[cluster][namespace][slo_doc_name][
                                slo_name].update({  # pylint: disable=line-too-long # noqa: E501
                                    sample.labels['type']:
                                    sample.value
                                })
        app['container_vulnerabilities'] = vuln_mx
        app['deployment_validations'] = validt_mx
        app['service_slo'] = slo_mx

    return apps
Example #11
0
def get_apps_data(date, month_delta=1):
    apps = queries.get_apps()
    saas_files = queries.get_saas_files()
    jjb, _ = init_jjb()
    saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod'])
    build_master_jobs = jjb.get_all_jobs(job_types=['build-master'])
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())
    saas_build_history = \
        get_build_history(jenkins_map, saas_jobs, timestamp_limit)
    build_master_build_history = \
        get_build_history(jenkins_map, build_master_jobs, timestamp_limit)

    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET})
    dashdotdb_url = secret_content['url']
    dashdotdb_user = secret_content['username']
    dashdotdb_pass = secret_content['password']
    metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics',
                           auth=(dashdotdb_user, dashdotdb_pass)).text
    namespaces = queries.get_namespaces()

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']

        logging.info(f"collecting post-deploy jobs "
                     f"information for {app_name}")
        post_deploy_jobs = {}
        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']

            # Only jobs of these types are expected to have a
            # further post-deploy job
            if not any([
                    'Deployment' in resource_types, 'DeploymentConfig'
                    not in resource_types
            ]):
                continue

            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:
                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']
                    post_deploy_jobs[cluster] = {}
                    post_deploy_jobs[cluster][namespace] = False

        for saas_file in saas_files:
            if saas_file['app']['name'] != app_name:
                continue
            resource_types = saas_file['managedResourceTypes']
            if 'Job' not in resource_types:
                continue
            for resource_template in saas_file['resourceTemplates']:
                for target in resource_template['targets']:

                    cluster = target['namespace']['cluster']['name']
                    namespace = target['namespace']['name']

                    # This block skips the check if the cluster/namespace
                    # has no Deployment/DeploymentConfig job associated.
                    if cluster not in post_deploy_jobs:
                        continue
                    if namespace not in post_deploy_jobs[cluster]:
                        continue

                    # Post-deploy job must depend on a openshift-saas-deploy
                    # job
                    if target['upstream'] is None:
                        continue
                    if target['upstream'].startswith('openshift-saas-deploy-'):
                        post_deploy_jobs[cluster][namespace] = True

        app['post_deploy_jobs'] = post_deploy_jobs

        logging.info(f"collecting promotions for {app_name}")
        app['promotions'] = {}
        saas_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'saasrepo'
        ]
        for sr in saas_repos:
            sr_history = saas_build_history.get(sr)
            if not sr_history:
                continue
            successes = [h for h in sr_history if h == 'SUCCESS']
            app['promotions'][sr] = (len(sr_history), len(successes))

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for cr in code_repos:
            cr_history = build_master_build_history.get(cr)
            if not cr_history:
                continue
            successes = [h for h in cr_history if h == 'SUCCESS']
            app['merge_activity'][cr] = (len(cr_history), len(successes))

        logging.info(f"collecting vulnerabilities information for {app_name}")
        app_namespaces = []
        for namespace in namespaces:
            if namespace['app']['name'] != app['name']:
                continue
            app_namespaces.append(namespace)
        app_metrics = {}
        for family in text_string_to_metric_families(metrics):
            for sample in family.samples:
                if sample.name != 'imagemanifestvuln_total':
                    continue
                for app_namespace in app_namespaces:
                    cluster = sample.labels['cluster']
                    if app_namespace['cluster']['name'] != cluster:
                        continue
                    namespace = sample.labels['namespace']
                    if app_namespace['name'] != namespace:
                        continue
                    severity = sample.labels['severity']
                    if cluster not in app_metrics:
                        app_metrics[cluster] = {}
                    if namespace not in app_metrics[cluster]:
                        app_metrics[cluster][namespace] = {}
                    if severity not in app_metrics[cluster][namespace]:
                        value = int(sample.value)
                        app_metrics[cluster][namespace][severity] = value
        app['container_vulnerabilities'] = app_metrics

    return apps
Example #12
0
def services(ctx):
    apps = queries.get_apps()
    columns = ['name']
    print_output(ctx.obj['output'], apps, columns)
Example #13
0
def get_apps_data(date, month_delta=1):
    apps = queries.get_apps()
    jjb, _ = init_jjb()
    saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod'])
    build_master_jobs = jjb.get_all_jobs(job_types=['build-master'])
    jenkins_map = jenkins_base.get_jenkins_map()
    time_limit = date - relativedelta(months=month_delta)
    timestamp_limit = \
        int(time_limit.replace(tzinfo=timezone.utc).timestamp())
    saas_build_history = \
        get_build_history(jenkins_map, saas_jobs, timestamp_limit)
    build_master_build_history = \
        get_build_history(jenkins_map, build_master_jobs, timestamp_limit)

    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET})
    dashdotdb_url = secret_content['url']
    dashdotdb_user = secret_content['username']
    dashdotdb_pass = secret_content['password']
    metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics',
                           auth=(dashdotdb_user, dashdotdb_pass)).text
    namespaces = queries.get_namespaces()

    for app in apps:
        if not app['codeComponents']:
            continue

        app_name = app['name']
        logging.info(f"collecting promotions for {app_name}")
        app['promotions'] = {}
        saas_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'saasrepo'
        ]
        for sr in saas_repos:
            sr_history = saas_build_history.get(sr)
            if not sr_history:
                continue
            successes = [h for h in sr_history if h == 'SUCCESS']
            app['promotions'][sr] = (len(sr_history), len(successes))

        logging.info(f"collecting merge activity for {app_name}")
        app['merge_activity'] = {}
        code_repos = [
            c['url'] for c in app['codeComponents']
            if c['resource'] == 'upstream'
        ]
        for cr in code_repos:
            cr_history = build_master_build_history.get(cr)
            if not cr_history:
                continue
            successes = [h for h in cr_history if h == 'SUCCESS']
            app['merge_activity'][cr] = (len(cr_history), len(successes))

        logging.info(f"collecting dashdotdb information for {app_name}")
        app_namespaces = []
        for namespace in namespaces:
            if namespace['app']['name'] != app['name']:
                continue
            app_namespaces.append(namespace)
        vuln_mx = {}
        validt_mx = {}
        for family in text_string_to_metric_families(metrics):
            for sample in family.samples:
                if sample.name == 'imagemanifestvuln_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        severity = sample.labels['severity']
                        if cluster not in vuln_mx:
                            vuln_mx[cluster] = {}
                        if namespace not in vuln_mx[cluster]:
                            vuln_mx[cluster][namespace] = {}
                        if severity not in vuln_mx[cluster][namespace]:
                            value = int(sample.value)
                            vuln_mx[cluster][namespace][severity] = value
                if sample.name == 'deploymentvalidation_total':
                    for app_namespace in app_namespaces:
                        cluster = sample.labels['cluster']
                        if app_namespace['cluster']['name'] != cluster:
                            continue
                        namespace = sample.labels['namespace']
                        if app_namespace['name'] != namespace:
                            continue
                        validation = sample.labels['validation']
                        # dvo: fail == 1, pass == 0, py: true == 1, false == 0
                        # so: ({false|pass}, {true|fail})
                        status = ('Passed', 'Failed')[sample.labels['status']]
                        if cluster not in validt_mx:
                            validt_mx[cluster] = {}
                        if namespace not in validt_mx[cluster]:
                            validt_mx[cluster][namespace] = {}
                        if validation not in validt_mx[cluster][namespace]:
                            validt_mx[cluster][namespace][validation] = {}
                        if status not in validt_mx[cluster][namespace][
                                validation]:  # noqa: E501
                            validt_mx[cluster][namespace][validation][
                                status] = {}  # noqa: E501
                        value = int(sample.value)
                        validt_mx[cluster][namespace][validation][
                            status] = value  # noqa: E501

        app['container_vulnerabilities'] = vuln_mx
        app['deployment_validations'] = validt_mx

    return apps