def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) # Remove saas-file targets that are disabled for saas_file in saas_files[:]: resource_templates = saas_file['resourceTemplates'] for rt in resource_templates[:]: targets = rt['targets'] for target in targets[:]: if target['disable']: targets.remove(target) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_moving_commits_diff(dry_run) already_triggered = [] error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_moving_commit(job_spec) except Exception: error = True logging.error( f"could not trigger job {job_name} in {instance_name}.") if error: sys.exit(1)
def run(dry_run, thread_pool_size=10, saas_file_name=None, env_name=None, defer=None): saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map) if not saasherder.valid: sys.exit(1) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # if saas_file_name is defined, the integration # is being called from multiple running instances ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=True, take_over=saasherder.take_over) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_configs_diff() already_triggered = [] error = True # enter loop while error: error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name( saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_config(job_spec) except Exception as e: error = True logging.error( f"could not trigger job {job_name} " + f"in {instance_name}. details: {str(e)}" ) if error: time.sleep(10) # add to contants module once created
def get_apps_data(date, month_delta=1): apps = queries.get_apps() jjb = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) return apps
def setup(thread_pool_size, internal, use_jump_host, integration, integration_version, v1, v2): """Setup required resources for triggering integrations Args: thread_pool_size (int): Thread pool size to use internal (bool): Should run for internal/extrenal/all clusters use_jump_host (bool): Should use jump host to reach clusters integration (string): Name of calling integration integration_version (string): Version of calling integration v1 (bool): Should trigger for v1 SaaS files v2 (bool): Should trigger for v2 SaaS files Returns: saasherder (SaasHerder): a SaasHerder instance jenkins_map (dict): Instance names with JenkinsApi instances oc_map (OC_Map): a dictionary of OC clients per cluster settings (dict): App-interface settings error (bool): True if one happened, False otherwise """ saas_files = queries.get_saas_files(v1=v1, v2=v2) if not saas_files: logging.error("no saas files found") return None, None, None, None, True saas_files = [sf for sf in saas_files if is_in_shard(sf["name"])] # Remove saas-file targets that are disabled for saas_file in saas_files[:]: resource_templates = saas_file["resourceTemplates"] for rt in resource_templates[:]: targets = rt["targets"] for target in targets[:]: if target["disable"]: targets.remove(target) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() pipelines_providers = queries.get_pipelines_providers() tkn_provider_namespaces = [ pp["namespace"] for pp in pipelines_providers if pp["provider"] == "tekton" ] oc_map = OC_Map( namespaces=tkn_provider_namespaces, integration=integration, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=integration, integration_version=integration_version, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) return saasherder, jenkins_map, oc_map, settings, False
def get_apps_data(date, month_delta=1, thread_pool_size=10): apps = queries.get_apps() saas_files = queries.get_saas_files() jjb, _ = init_jjb() jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] auth = (dashdotdb_user, dashdotdb_pass) vuln_metrics = requests.get( f'{dashdotdb_url}/api/v1/imagemanifestvuln/metrics', auth=auth).text validt_metrics = requests.get( f'{dashdotdb_url}/api/v1/deploymentvalidation/metrics', auth=auth).text slo_metrics = requests.get( f'{dashdotdb_url}/api/v1/serviceslometrics/metrics', auth=auth).text namespaces = queries.get_namespaces() build_jobs = jjb.get_all_jobs(job_types=['build']) jobs_to_get = build_jobs.copy() saas_deploy_jobs = [] for saas_file in saas_files: saas_file_name = saas_file['name'] for template in saas_file["resourceTemplates"]: for target in template["targets"]: job = {} job['env'] = target["namespace"]["environment"]["name"] job['app'] = target["namespace"]["app"]["name"] job['cluster'] = target['namespace']['cluster']['name'] job['namespace'] = target['namespace']['name'] job['name'] = get_openshift_saas_deploy_job_name( saas_file_name, job['env'], settings) job['saas_file_name'] = saas_file_name job['instance'] = saas_file["instance"]["name"] saas_deploy_jobs.append(job) if job['instance'] not in jobs_to_get: jobs_to_get[job['instance']] = [job] else: jobs_to_get[job['instance']].append(job) job_history = get_build_history_pool(jenkins_map, jobs_to_get, timestamp_limit, thread_pool_size) for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting post-deploy jobs " f"information for {app_name}") post_deploy_jobs = {} for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] # Only jobs of these types are expected to have a # further post-deploy job if not any([ 'Deployment' in resource_types, 'DeploymentConfig' not in resource_types ]): continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] post_deploy_jobs[cluster] = {} post_deploy_jobs[cluster][namespace] = False for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] if 'Job' not in resource_types: continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] # This block skips the check if the cluster/namespace # has no Deployment/DeploymentConfig job associated. if cluster not in post_deploy_jobs: continue if namespace not in post_deploy_jobs[cluster]: continue # Post-deploy job must depend on a openshift-saas-deploy # job if target['upstream'] is None: continue if target['upstream'].startswith('openshift-saas-deploy-'): post_deploy_jobs[cluster][namespace] = True app['post_deploy_jobs'] = post_deploy_jobs logging.info(f"collecting promotion history for {app_name}") app["promotions"] = {} for job in saas_deploy_jobs: if job['app'] != app_name: continue if job['name'] not in job_history: continue history = job_history[job["name"]] saas_file_name = job['saas_file_name'] if saas_file_name not in app["promotions"]: app["promotions"][saas_file_name] = [{ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }] else: app["promotions"][saas_file_name].append({ "env": job["env"], "cluster": job["cluster"], "namespace": job["namespace"], **history }) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for jobs in build_jobs.values(): for job in jobs: try: repo_url = get_repo_url(job) except KeyError: continue if repo_url not in code_repos: continue if job['name'] not in job_history: continue history = job_history[job['name']] if repo_url not in app["merge_activity"]: app["merge_activity"][repo_url] = [{ "branch": job["branch"], **history }] else: app["merge_activity"][repo_url].append({ "branch": job["branch"], **history }) logging.info(f"collecting dashdotdb information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) vuln_mx = {} validt_mx = {} slo_mx = {} for family in text_string_to_metric_families(vuln_metrics): for sample in family.samples: if sample.name == 'imagemanifestvuln_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in vuln_mx: vuln_mx[cluster] = {} if namespace not in vuln_mx[cluster]: vuln_mx[cluster][namespace] = {} if severity not in vuln_mx[cluster][namespace]: value = int(sample.value) vuln_mx[cluster][namespace][severity] = value for family in text_string_to_metric_families(validt_metrics): for sample in family.samples: if sample.name == 'deploymentvalidation_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue validation = sample.labels['validation'] # dvo: fail == 1, pass == 0, py: true == 1, false == 0 # so: ({false|pass}, {true|fail}) status = ('Passed', 'Failed')[int(sample.labels['status'])] if cluster not in validt_mx: validt_mx[cluster] = {} if namespace not in validt_mx[cluster]: validt_mx[cluster][namespace] = {} if validation not in validt_mx[cluster][namespace]: validt_mx[cluster][namespace][validation] = {} if status not in validt_mx[cluster][namespace][ validation]: # noqa: E501 validt_mx[cluster][namespace][validation][ status] = {} # noqa: E501 value = int(sample.value) validt_mx[cluster][namespace][validation][ status] = value # noqa: E501 for family in text_string_to_metric_families(slo_metrics): for sample in family.samples: if sample.name == 'serviceslometrics': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue slo_doc_name = sample.labels['slodoc'] slo_name = sample.labels['name'] if cluster not in slo_mx: slo_mx[cluster] = {} if namespace not in slo_mx[cluster]: slo_mx[cluster][namespace] = {} if slo_doc_name not in slo_mx[cluster][namespace]: # pylint: disable=line-too-long # noqa: E501 slo_mx[cluster][namespace][slo_doc_name] = {} if slo_name not in slo_mx[cluster][namespace][ slo_doc_name]: # noqa: E501 slo_mx[cluster][namespace][slo_doc_name][ slo_name] = { # noqa: E501 sample.labels['type']: sample.value } else: slo_mx[cluster][namespace][slo_doc_name][ slo_name].update({ # pylint: disable=line-too-long # noqa: E501 sample.labels['type']: sample.value }) app['container_vulnerabilities'] = vuln_mx app['deployment_validations'] = validt_mx app['service_slo'] = slo_mx return apps
def run( dry_run, thread_pool_size=10, io_dir="throughput/", saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None, ): all_saas_files = queries.get_saas_files(v1=True, v2=True) saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True) app_interface_settings = queries.get_app_interface_settings() if not saas_files: logging.error("no saas files found") sys.exit(ExitCodes.ERROR) # notify different outputs (publish results, slack notifications) # we only do this if: # - this is not a dry run # - there is a single saas file deployed notify = not dry_run and len(saas_files) == 1 if notify: saas_file = saas_files[0] slack_info = saas_file.get("slack") if slack_info: slack = slackapi_from_slack_workspace( slack_info, app_interface_settings, QONTRACT_INTEGRATION, init_usergroups=False, ) # support built-in start and end slack notifications # only in v2 saas files if saas_file["apiVersion"] == "v2": ri = ResourceInventory() console_url = compose_console_url(saas_file, saas_file_name, env_name) # deployment result notification defer( lambda: slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=False, ) ) # deployment start notification slack_notifications = slack_info.get("notifications") if slack_notifications and slack_notifications.get("start"): slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=True, ) else: slack = None instance = queries.get_gitlab_instance() # instance exists in v1 saas files only desired_jenkins_instances = [ s["instance"]["name"] for s in saas_files if s.get("instance") ] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances ) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) if len(saasherder.namespaces) == 0: logging.warning("no targets found") sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True, cluster_admin=saasherder.cluster_admin, ) defer(oc_map.cleanup) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error("invalid promotions") ri.register_error() sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data( dry_run, oc_map, ri, thread_pool_size, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over, ) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if notify: # Auto-promote next stages only if there are changes in the # promoting stage. This prevents trigger promotions on job re-runs auto_promote = len(actions) > 0 mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if notify and slack and actions and slack_info.get("output") == "events": for action in actions: message = ( f"[{action['cluster']}] " + f"{action['kind']} {action['name']} {action['action']}" ) slack.chat_post_message(message)
def get_apps_data(date, month_delta=1): apps = queries.get_apps() saas_files = queries.get_saas_files() jjb, _ = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics', auth=(dashdotdb_user, dashdotdb_pass)).text namespaces = queries.get_namespaces() for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting post-deploy jobs " f"information for {app_name}") post_deploy_jobs = {} for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] # Only jobs of these types are expected to have a # further post-deploy job if not any([ 'Deployment' in resource_types, 'DeploymentConfig' not in resource_types ]): continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] post_deploy_jobs[cluster] = {} post_deploy_jobs[cluster][namespace] = False for saas_file in saas_files: if saas_file['app']['name'] != app_name: continue resource_types = saas_file['managedResourceTypes'] if 'Job' not in resource_types: continue for resource_template in saas_file['resourceTemplates']: for target in resource_template['targets']: cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] # This block skips the check if the cluster/namespace # has no Deployment/DeploymentConfig job associated. if cluster not in post_deploy_jobs: continue if namespace not in post_deploy_jobs[cluster]: continue # Post-deploy job must depend on a openshift-saas-deploy # job if target['upstream'] is None: continue if target['upstream'].startswith('openshift-saas-deploy-'): post_deploy_jobs[cluster][namespace] = True app['post_deploy_jobs'] = post_deploy_jobs logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) logging.info(f"collecting vulnerabilities information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) app_metrics = {} for family in text_string_to_metric_families(metrics): for sample in family.samples: if sample.name != 'imagemanifestvuln_total': continue for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in app_metrics: app_metrics[cluster] = {} if namespace not in app_metrics[cluster]: app_metrics[cluster][namespace] = {} if severity not in app_metrics[cluster][namespace]: value = int(sample.value) app_metrics[cluster][namespace][severity] = value app['container_vulnerabilities'] = app_metrics return apps
def get_apps_data(date, month_delta=1): apps = queries.get_apps() jjb, _ = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics', auth=(dashdotdb_user, dashdotdb_pass)).text namespaces = queries.get_namespaces() for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) logging.info(f"collecting dashdotdb information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) vuln_mx = {} validt_mx = {} for family in text_string_to_metric_families(metrics): for sample in family.samples: if sample.name == 'imagemanifestvuln_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in vuln_mx: vuln_mx[cluster] = {} if namespace not in vuln_mx[cluster]: vuln_mx[cluster][namespace] = {} if severity not in vuln_mx[cluster][namespace]: value = int(sample.value) vuln_mx[cluster][namespace][severity] = value if sample.name == 'deploymentvalidation_total': for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue validation = sample.labels['validation'] # dvo: fail == 1, pass == 0, py: true == 1, false == 0 # so: ({false|pass}, {true|fail}) status = ('Passed', 'Failed')[sample.labels['status']] if cluster not in validt_mx: validt_mx[cluster] = {} if namespace not in validt_mx[cluster]: validt_mx[cluster][namespace] = {} if validation not in validt_mx[cluster][namespace]: validt_mx[cluster][namespace][validation] = {} if status not in validt_mx[cluster][namespace][ validation]: # noqa: E501 validt_mx[cluster][namespace][validation][ status] = {} # noqa: E501 value = int(sample.value) validt_mx[cluster][namespace][validation][ status] = value # noqa: E501 app['container_vulnerabilities'] = vuln_mx app['deployment_validations'] = validt_mx return apps
def run(dry_run, thread_pool_size=10, saas_file_name=None, env_name=None, defer=None): saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map) if not saasherder.valid: sys.exit(1) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over) if ri.has_error_registered(): sys.exit(1) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if not dry_run and len(saasherder.saas_files) == 1: saas_file = saasherder.saas_files[0] slack_info = saas_file.get('slack') if slack_info and actions and slack_info.get('output') == 'events': slack = init_slack(slack_info, QONTRACT_INTEGRATION) for action in actions: message = \ f"[{action['cluster']}] " + \ f"{action['kind']} {action['name']} {action['action']}" slack.chat_post_message(message)
def run(dry_run, thread_pool_size=10, io_dir='throughput/', saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None): all_saas_files = queries.get_saas_files() saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(ExitCodes.ERROR) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error('invalid promotions') sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if not dry_run and len(saasherder.saas_files) == 1: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if not dry_run and len(saasherder.saas_files) == 1: saas_file = saasherder.saas_files[0] slack_info = saas_file.get('slack') if slack_info and actions and slack_info.get('output') == 'events': slack = init_slack(slack_info, QONTRACT_INTEGRATION, init_usergroups=False) for action in actions: message = \ f"[{action['cluster']}] " + \ f"{action['kind']} {action['name']} {action['action']}" slack.chat_post_message(message)