def _trigger_jenkins(spec, dry_run, saasherder, jenkins_map, already_triggered, settings, trigger_type): # TODO: Convert these into a dataclass. saas_file_name = spec["saas_file_name"] env_name = spec["env_name"] pipelines_provider = spec["pipelines_provider"] instance_name = pipelines_provider["instance"]["name"] job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name, settings) error = False to_trigger = _register_trigger(job_name, already_triggered) if to_trigger: logging.info(["trigger_job", instance_name, job_name]) if not dry_run: jenkins = jenkins_map[instance_name] try: jenkins.trigger_job(job_name) except Exception as e: error = True logging.error(f"could not trigger job {job_name} " + f"in {instance_name}. details: {str(e)}") if not error and not dry_run: saasherder.update_state(trigger_type, spec) return error
def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) # Remove saas-file targets that are disabled for saas_file in saas_files[:]: resource_templates = saas_file['resourceTemplates'] for rt in resource_templates[:]: targets = rt['targets'] for target in targets[:]: if target['disable']: targets.remove(target) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_moving_commits_diff(dry_run) already_triggered = [] error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_moving_commit(job_spec) except Exception: error = True logging.error( f"could not trigger job {job_name} in {instance_name}.") if error: sys.exit(1)
def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_configs_diff() already_triggered = [] error = True # enter loop while error: error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name( saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_config(job_spec) except Exception as e: error = True logging.error( f"could not trigger job {job_name} " + f"in {instance_name}. details: {str(e)}" ) if error: time.sleep(10) # add to contants module once created
def get_apps_data(date, month_delta=1, thread_pool_size=10): apps = queries.get_apps() saas_files = queries.get_saas_files() jjb, _ = init_jjb() jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] auth = (dashdotdb_user, dashdotdb_pass) vuln_metrics = requests.get( f'{dashdotdb_url}/api/v1/imagemanifestvuln/metrics', auth=auth).text validt_metrics = requests.get( f'{dashdotdb_url}/api/v1/deploymentvalidation/metrics', auth=auth).text slo_metrics = requests.get( f'{dashdotdb_url}/api/v1/serviceslometrics/metrics', auth=auth).text namespaces = queries.get_namespaces() build_jobs = jjb.get_all_jobs(job_types=['build']) jobs_to_get = build_jobs.copy() saas_deploy_jobs = [] for saas_file in saas_files: saas_file_name = saas_file['name'] for template in saas_file["resourceTemplates"]: for target in template["targets"]: job = {} job['env'] = target["namespace"]["environment"]["name"] job['app'] = target["namespace"]["app"]["name"] job['cluster'] = target['namespace']['cluster']['name'] job['namespace'] = target['namespace']['name'] job['name'] = get_openshift_saas_deploy_job_name( saas_file_name, job['env'], settings) job['saas_file_name'] = saas_file_name job['instance'] = saas_file["instance"]["name"] saas_deploy_jobs.append(job) if job['instance'] not in jobs_to_get: jobs_to_get[job['instance']] = [job] else: jobs_to_get[job['instance']].append(job) job_history = get_build_history_pool(jenkins_map, jobs_to_get, timestamp_limit, thread_pool_size) for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting post-deploy jobs " f"information for {app_name}") post_deploy_jobs = {} for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] # Only jobs of these types are expected to have a # further post-deploy job if not any([ 'Deployment' in resource_types, 'DeploymentConfig' not in resource_types ]): continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] post_deploy_jobs[cluster] = {} post_deploy_jobs[cluster][namespace] = False for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] if 'Job' not in resource_types: continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] # This block skips the check if the cluster/namespace # has no Deployment/DeploymentConfig job associated. if cluster not in post_deploy_jobs: continue if namespace not in post_deploy_jobs[cluster]: continue # Post-deploy job must depend on a openshift-saas-deploy # job if target['upstream'] is None: continue if target['upstream'].startswith('openshift-saas-deploy-'): post_deploy_jobs[cluster][namespace] = True app['post_deploy_jobs'] = post_deploy_jobs logging.info(f"collecting promotion history for {app_name}") app["promotions"] = {} for job in saas_deploy_jobs: if job['app'] != app_name: continue if job['name'] not in job_history: continue history = job_history[job["name"]] saas_file_name = job['saas_file_name'] if saas_file_name not in app["promotions"]: app["promotions"][saas_file_name] = [{ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }] else: app["promotions"][saas_file_name].append({ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for jobs in build_jobs.values(): for job in jobs: try: repo_url = get_repo_url(job) except KeyError: continue if repo_url not in code_repos: continue if job['name'] not in job_history: continue history = job_history[job['name']] if repo_url not in app["merge_activity"]: app["merge_activity"][repo_url] = [{ "branch": job["branch"], **history }] else: app["merge_activity"][repo_url].append({ "branch": job["branch"], **history }) logging.info(f"collecting dashdotdb information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) vuln_mx = {} validt_mx = {} slo_mx = {} for family in text_string_to_metric_families(vuln_metrics): for sample in family.samples: if sample.name == 'imagemanifestvuln_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in vuln_mx: vuln_mx[cluster] = {} if namespace not in vuln_mx[cluster]: vuln_mx[cluster][namespace] = {} if severity not in vuln_mx[cluster][namespace]: value = int(sample.value) vuln_mx[cluster][namespace][severity] = value for family in text_string_to_metric_families(validt_metrics): for sample in family.samples: if sample.name == 'deploymentvalidation_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue validation = sample.labels['validation'] # dvo: fail == 1, pass == 0, py: true == 1, false == 0 # so: ({false|pass}, {true|fail}) status = ('Passed', 'Failed')[int(sample.labels['status'])] if cluster not in validt_mx: validt_mx[cluster] = {} if namespace not in validt_mx[cluster]: validt_mx[cluster][namespace] = {} if validation not in validt_mx[cluster][namespace]: validt_mx[cluster][namespace][validation] = {} if status not in validt_mx[cluster][namespace][ validation]: # noqa: E501 validt_mx[cluster][namespace][validation][ status] = {} # noqa: E501 value = int(sample.value) validt_mx[cluster][namespace][validation][ status] = value # noqa: E501 for family in text_string_to_metric_families(slo_metrics): for sample in family.samples: if sample.name == 'serviceslometrics': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue slo_doc_name = sample.labels['slodoc'] slo_name = sample.labels['name'] if cluster not in slo_mx: slo_mx[cluster] = {} if namespace not in slo_mx[cluster]: slo_mx[cluster][namespace] = {} if slo_doc_name not in slo_mx[cluster][namespace]: # pylint: disable=line-too-long # noqa: E501 slo_mx[cluster][namespace][slo_doc_name] = {} if slo_name not in slo_mx[cluster][namespace][ slo_doc_name]: # noqa: E501 slo_mx[cluster][namespace][slo_doc_name][ slo_name] = { # noqa: E501 sample.labels['type']: sample.value } else: slo_mx[cluster][namespace][slo_doc_name][ slo_name].update({ # pylint: disable=line-too-long # noqa: E501 sample.labels['type']: sample.value }) app['container_vulnerabilities'] = vuln_mx app['deployment_validations'] = validt_mx app['service_slo'] = slo_mx return apps