def run(dry_run=False, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('limitRanges') ] namespaces = construct_resources(namespaces) if not namespaces: logging.debug("No LimitRanges definition found in app-interface!") sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['LimitRange'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) add_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri, enable_deletion=True, take_over=take_over)
def run(dry_run: bool, thread_pool_size=10, internal: Optional[bool] = None, use_jump_host=True, defer=None): all_namespaces = queries.get_namespaces(minimal=True) shard_namespaces, duplicates = get_shard_namespaces(all_namespaces) desired_state = get_desired_state(shard_namespaces) settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=shard_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) defer(oc_map.cleanup) results = threaded.run(manage_namespaces, desired_state, thread_pool_size, return_exceptions=True, dry_run=dry_run, oc_map=oc_map) err = check_results(desired_state, results) if err or duplicates: sys.exit(ExitCodes.ERROR)
def run( dry_run, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None, ): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get("limitRanges") ] namespaces = construct_resources(namespaces) if not namespaces: logging.debug("No LimitRanges definition found in app-interface!") sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["LimitRange"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) add_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri, thread_pool_size, take_over=take_over) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get("managedRoles") and is_in_shard(f"{namespace_info['cluster']['name']}/" + f"{namespace_info['name']}") ] ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=["RoleBinding.authorization.openshift.io"], internal=internal, use_jump_host=use_jump_host, ) defer(oc_map.cleanup) fetch_desired_state(ri, oc_map) ob.realize_data(dry_run, oc_map, ri, thread_pool_size) if ri.has_error_registered(): sys.exit(1)
def namespaces(ctx, name): namespaces = queries.get_namespaces() if name: namespaces = [ns for ns in namespaces if ns['name'] == name] columns = ['name', 'cluster.name', 'app.name'] print_output(ctx.obj['output'], namespaces, columns)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('quota') ] if not namespaces: logging.debug("No ResourceQuota definition found in app-interface!") sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['ResourceQuota'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1)
def run(dry_run=False, thread_pool_size=10, internal=None, use_jump_host=True, vault_output_path='', defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('openshiftServiceAccountTokens') ] for namespace_info in namespaces: if not namespace_info.get('openshiftServiceAccountTokens'): continue for sat in namespace_info['openshiftServiceAccountTokens']: namespaces.append(sat['namespace']) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['Secret'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if not dry_run and vault_output_path: write_outputs_to_vault(vault_output_path, ri) if ri.has_error_registered(): sys.exit(1)
def get_gql_namespaces_in_shard() -> List[Any]: """ Get all namespaces from qontract-server and filter those which are in our shard """ all_namespaces = queries.get_namespaces() return [ns for ns in all_namespaces if not ns.get('delete') and is_in_shard(f"{ns['cluster']['name']}/{ns['name']}")]
def namespaces(ctx, name): namespaces = queries.get_namespaces() if name: namespaces = [ns for ns in namespaces if ns['name'] == name] columns = ['name', 'cluster.name', 'app.name'] # TODO(mafriedm): fix this # do not sort ctx.obj['options']['sort'] = False print_output(ctx.obj['options'], namespaces, columns)
def service_owners_for_rds_instance(ctx, aws_account, identifier): namespaces = queries.get_namespaces() service_owners = [] for namespace_info in namespaces: if namespace_info.get('terraformResources') is None: continue for tf in namespace_info.get('terraformResources'): if tf['provider'] == 'rds' and tf['account'] == aws_account and \ tf['identifier'] == identifier: service_owners = namespace_info['app']['serviceOwners'] break columns = ['name', 'email'] print_output(ctx.obj['output'], service_owners, columns)
def acme_accounts(ctx): namespaces = queries.get_namespaces() acme_usage = {} for namespace_info in namespaces: if namespace_info.get('openshiftAcme') is None: continue namespace_name = namespace_info['name'] cluster_name = namespace_info['cluster']['name'] acme_secret = \ namespace_info['openshiftAcme']['accountSecret']['path'] acme_usage.setdefault(acme_secret, []) acme_usage[acme_secret].append(f"{cluster_name}/{namespace_name}") usage = [{'path': k, 'usage': len(v), 'namespaces': ', '.join(v)} for k, v in acme_usage.items()] columns = ['path', 'usage', 'namespaces'] print_output(ctx.obj['output'], usage, columns)
def run(dry_run=False, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('managedRoles') ] ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['RoleBinding'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(ri, oc_map) ob.realize_data(dry_run, oc_map, ri)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): try: namespaces = [ namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('openshiftAcme') ] namespaces = construct_resources(namespaces) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=[ 'Deployment', 'Role', 'RoleBinding', 'ServiceAccount', 'Secret'], internal=internal, use_jump_host=use_jump_host) add_desired_state(namespaces, ri, oc_map) defer(lambda: oc_map.cleanup()) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1) except Exception as e: msg = 'There was problem running openshift acme reconcile.' msg += ' Exception: {}' msg = msg.format(str(e)) logging.error(msg) sys.exit(1)
def run(dry_run, thread_pool_size=10): namespaces = queries.get_namespaces() tfrs_to_mirror = [] for namespace in namespaces: if namespace["terraformResources"] is None: continue for tfr in namespace["terraformResources"]: if tfr["provider"] != "ecr": continue if tfr["mirror"] is None: continue tfrs_to_mirror.append(tfr) work_list = threaded.run(EcrMirror, tfrs_to_mirror, thread_pool_size=thread_pool_size, dry_run=dry_run) threaded.run(worker, work_list, thread_pool_size=thread_pool_size)
def run(dry_run=False, thread_pool_size=10, internal=None, use_jump_host=True, take_over=True, defer=None): try: namespaces = [namespace_info for namespace_info in queries.get_namespaces() if namespace_info.get('quota')] ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['ResourceQuota'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1) except Exception as e: logging.error(f"Error during execution. Exception: {str(e)}") sys.exit(1)
def ai_find_namespace(self, _, args): """Search namespaces registered in app-interface""" if len(args) < 1: return "Must supply a search argument" term = args[0].lower() if len(term) < 3: return f"Search term '{term}' is too short (min 3 characters)" server = self.config['gql_server'] token = self.config['gql_token'] gql.init(server, token) namespaces = queries.get_namespaces() found = [] for namespace in namespaces: if term in namespace['name'].lower(): found.append(namespace) if len(found) == 0: return f"No namespaces found for term '{term}'." return {'namespaces': found}
def get_apps_data(date, month_delta=1): apps = queries.get_apps() jjb, _ = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics', auth=(dashdotdb_user, dashdotdb_pass)).text namespaces = queries.get_namespaces() for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) logging.info(f"collecting dashdotdb information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) vuln_mx = {} validt_mx = {} for family in text_string_to_metric_families(metrics): for sample in family.samples: if sample.name == 'imagemanifestvuln_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in vuln_mx: vuln_mx[cluster] = {} if namespace not in vuln_mx[cluster]: vuln_mx[cluster][namespace] = {} if severity not in vuln_mx[cluster][namespace]: value = int(sample.value) vuln_mx[cluster][namespace][severity] = value if sample.name == 'deploymentvalidation_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue validation = sample.labels['validation'] # dvo: fail == 1, pass == 0, py: true == 1, false == 0 # so: ({false|pass}, {true|fail}) status = ('Passed', 'Failed')[sample.labels['status']] if cluster not in validt_mx: validt_mx[cluster] = {} if namespace not in validt_mx[cluster]: validt_mx[cluster][namespace] = {} if validation not in validt_mx[cluster][namespace]: validt_mx[cluster][namespace][validation] = {} if status not in validt_mx[cluster][namespace][ validation]: # noqa: E501 validt_mx[cluster][namespace][validation][ status] = {} # noqa: E501 value = int(sample.value) validt_mx[cluster][namespace][validation][ status] = value # noqa: E501 app['container_vulnerabilities'] = vuln_mx app['deployment_validations'] = validt_mx return apps
def run(dry_run, gitlab_project_id=None): settings = queries.get_app_interface_settings() namespaces = queries.get_namespaces() # This is a list of app-interface ECR resources and their # mirrors osd_mirrors = [] for namespace in namespaces: # We are only interested on the ECR resources from # this specific namespace if namespace['name'] != 'osd-operators-ecr-mirrors': continue if namespace['terraformResources'] is None: continue for tfr in namespace['terraformResources']: if tfr['provider'] != 'ecr': continue if tfr['mirror'] is None: continue osd_mirrors.append(tfr) # Now the tricky part. The "OCP Release ECR Mirror" is a stand-alone # object in app-interface. We have to process it so we get the # upstream and the mirror repositories instances = queries.get_ocp_release_ecr_mirror() for instance in instances: namespace = instance['ecrResourcesNamespace'] ocp_release_identifier = instance['ocpReleaseEcrIdentifier'] ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier'] ocp_release_tf_info = get_ecr_tf_resource_info(namespace, ocp_release_identifier) # We get an ECR resource from app-interface, but it has # no mirror property as the mirroring is done differently # there (see qontract-reconcile-ocp-release-ecr-mirror). # The quay repositories are not managed in app-interface, but # we know where they are by looking at the ClusterImageSets # in Hive. # Let's just manually inject the mirror information so we # process all the ECR resources the same way ocp_release_tf_info['mirror'] = { 'url': 'quay.io/openshift-release-dev/ocp-release', 'pullCredentials': None, 'tags': None, 'tagsExclude': None } osd_mirrors.append(ocp_release_tf_info) ocp_art_dev_tf_info = get_ecr_tf_resource_info(namespace, ocp_art_dev_identifier) ocp_art_dev_tf_info['mirror'] = { 'url': 'quay.io/openshift-release-dev/ocp-v4.0-art-dev', 'pullCredentials': None, 'tags': None, 'tagsExclude': None } osd_mirrors.append(ocp_art_dev_tf_info) # Initializing the AWS Client for all the accounts # with ECR resources of interest accounts = [] for tfr in osd_mirrors: account = get_aws_account_info(tfr['account']) if account not in accounts: accounts.append(account) aws_cli = AWSApi(thread_pool_size=1, accounts=accounts, settings=settings, init_ecr_auth_tokens=True) aws_cli.map_ecr_resources() # Building up the mirrors information in the # install-config.yaml compatible format mirrors_info = [] for tfr in osd_mirrors: image_url = get_image_uri(aws_cli=aws_cli, account=tfr['account'], repository=tfr['identifier']) mirrors_info.append({ 'source': tfr['mirror']['url'], 'mirrors': [ image_url, ] }) if not dry_run: # Creating the MR to app-interface mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) mr = CSInstallConfig(mirrors_info=mirrors_info) mr.submit(cli=mr_cli)
def get_apps_data(date, month_delta=1): apps = queries.get_apps() saas_files = queries.get_saas_files() jjb, _ = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics', auth=(dashdotdb_user, dashdotdb_pass)).text namespaces = queries.get_namespaces() for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting post-deploy jobs " f"information for {app_name}") post_deploy_jobs = {} for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] # Only jobs of these types are expected to have a # further post-deploy job if not any([ 'Deployment' in resource_types, 'DeploymentConfig' not in resource_types ]): continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] post_deploy_jobs[cluster] = {} post_deploy_jobs[cluster][namespace] = False for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] if 'Job' not in resource_types: continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] # This block skips the check if the cluster/namespace # has no Deployment/DeploymentConfig job associated. if cluster not in post_deploy_jobs: continue if namespace not in post_deploy_jobs[cluster]: continue # Post-deploy job must depend on a openshift-saas-deploy # job if target['upstream'] is None: continue if target['upstream'].startswith('openshift-saas-deploy-'): post_deploy_jobs[cluster][namespace] = True app['post_deploy_jobs'] = post_deploy_jobs logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) logging.info(f"collecting vulnerabilities information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) app_metrics = {} for family in text_string_to_metric_families(metrics): for sample in family.samples: if sample.name != 'imagemanifestvuln_total': continue for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in app_metrics: app_metrics[cluster] = {} if namespace not in app_metrics[cluster]: app_metrics[cluster][namespace] = {} if severity not in app_metrics[cluster][namespace]: value = int(sample.value) app_metrics[cluster][namespace][severity] = value app['container_vulnerabilities'] = app_metrics return apps
def get_apps_data(date, month_delta=1, thread_pool_size=10): apps = queries.get_apps() saas_files = queries.get_saas_files() jjb, _ = init_jjb() jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] auth = (dashdotdb_user, dashdotdb_pass) vuln_metrics = requests.get( f'{dashdotdb_url}/api/v1/imagemanifestvuln/metrics', auth=auth).text validt_metrics = requests.get( f'{dashdotdb_url}/api/v1/deploymentvalidation/metrics', auth=auth).text slo_metrics = requests.get( f'{dashdotdb_url}/api/v1/serviceslometrics/metrics', auth=auth).text namespaces = queries.get_namespaces() build_jobs = jjb.get_all_jobs(job_types=['build']) jobs_to_get = build_jobs.copy() saas_deploy_jobs = [] for saas_file in saas_files: saas_file_name = saas_file['name'] for template in saas_file["resourceTemplates"]: for target in template["targets"]: job = {} job['env'] = target["namespace"]["environment"]["name"] job['app'] = target["namespace"]["app"]["name"] job['cluster'] = target['namespace']['cluster']['name'] job['namespace'] = target['namespace']['name'] job['name'] = get_openshift_saas_deploy_job_name( saas_file_name, job['env'], settings) job['saas_file_name'] = saas_file_name job['instance'] = saas_file["instance"]["name"] saas_deploy_jobs.append(job) if job['instance'] not in jobs_to_get: jobs_to_get[job['instance']] = [job] else: jobs_to_get[job['instance']].append(job) job_history = get_build_history_pool(jenkins_map, jobs_to_get, timestamp_limit, thread_pool_size) for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting post-deploy jobs " f"information for {app_name}") post_deploy_jobs = {} for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] # Only jobs of these types are expected to have a # further post-deploy job if not any([ 'Deployment' in resource_types, 'DeploymentConfig' not in resource_types ]): continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] post_deploy_jobs[cluster] = {} post_deploy_jobs[cluster][namespace] = False for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] if 'Job' not in resource_types: continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] # This block skips the check if the cluster/namespace # has no Deployment/DeploymentConfig job associated. if cluster not in post_deploy_jobs: continue if namespace not in post_deploy_jobs[cluster]: continue # Post-deploy job must depend on a openshift-saas-deploy # job if target['upstream'] is None: continue if target['upstream'].startswith('openshift-saas-deploy-'): post_deploy_jobs[cluster][namespace] = True app['post_deploy_jobs'] = post_deploy_jobs logging.info(f"collecting promotion history for {app_name}") app["promotions"] = {} for job in saas_deploy_jobs: if job['app'] != app_name: continue if job['name'] not in job_history: continue history = job_history[job["name"]] saas_file_name = job['saas_file_name'] if saas_file_name not in app["promotions"]: app["promotions"][saas_file_name] = [{ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }] else: app["promotions"][saas_file_name].append({ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for jobs in build_jobs.values(): for job in jobs: try: repo_url = get_repo_url(job) except KeyError: continue if repo_url not in code_repos: continue if job['name'] not in job_history: continue history = job_history[job['name']] if repo_url not in app["merge_activity"]: app["merge_activity"][repo_url] = [{ "branch": job["branch"], **history }] else: app["merge_activity"][repo_url].append({ "branch": job["branch"], **history }) logging.info(f"collecting dashdotdb information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) vuln_mx = {} validt_mx = {} slo_mx = {} for family in text_string_to_metric_families(vuln_metrics): for sample in family.samples: if sample.name == 'imagemanifestvuln_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in vuln_mx: vuln_mx[cluster] = {} if namespace not in vuln_mx[cluster]: vuln_mx[cluster][namespace] = {} if severity not in vuln_mx[cluster][namespace]: value = int(sample.value) vuln_mx[cluster][namespace][severity] = value for family in text_string_to_metric_families(validt_metrics): for sample in family.samples: if sample.name == 'deploymentvalidation_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue validation = sample.labels['validation'] # dvo: fail == 1, pass == 0, py: true == 1, false == 0 # so: ({false|pass}, {true|fail}) status = ('Passed', 'Failed')[int(sample.labels['status'])] if cluster not in validt_mx: validt_mx[cluster] = {} if namespace not in validt_mx[cluster]: validt_mx[cluster][namespace] = {} if validation not in validt_mx[cluster][namespace]: validt_mx[cluster][namespace][validation] = {} if status not in validt_mx[cluster][namespace][ validation]: # noqa: E501 validt_mx[cluster][namespace][validation][ status] = {} # noqa: E501 value = int(sample.value) validt_mx[cluster][namespace][validation][ status] = value # noqa: E501 for family in text_string_to_metric_families(slo_metrics): for sample in family.samples: if sample.name == 'serviceslometrics': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue slo_doc_name = sample.labels['slodoc'] slo_name = sample.labels['name'] if cluster not in slo_mx: slo_mx[cluster] = {} if namespace not in slo_mx[cluster]: slo_mx[cluster][namespace] = {} if slo_doc_name not in slo_mx[cluster][namespace]: # pylint: disable=line-too-long # noqa: E501 slo_mx[cluster][namespace][slo_doc_name] = {} if slo_name not in slo_mx[cluster][namespace][ slo_doc_name]: # noqa: E501 slo_mx[cluster][namespace][slo_doc_name][ slo_name] = { # noqa: E501 sample.labels['type']: sample.value } else: slo_mx[cluster][namespace][slo_doc_name][ slo_name].update({ # pylint: disable=line-too-long # noqa: E501 sample.labels['type']: sample.value }) app['container_vulnerabilities'] = vuln_mx app['deployment_validations'] = validt_mx app['service_slo'] = slo_mx return apps