def run(dry_run, gitlab_project_id=None, gitlab_merge_request_id=None): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) labels = gl.get_merge_request_labels(gitlab_merge_request_id) output = "yes" if SKIP_CI in labels else "no" print(output)
def init(gitlab_project_id=None, sqs_or_gitlab=None): """ Creates the Merge Request client to of a given type. :param gitlab_project_id: used when the client type is 'gitlab' :param sqs_or_gitlab: 'gitlab' or 'sqs' :return: an instance of the selected MR client. """ if sqs_or_gitlab is None: settings = queries.get_app_interface_settings() client_type = settings.get('mergeRequestGateway', 'gitlab') else: client_type = sqs_or_gitlab if client_type == 'gitlab': if gitlab_project_id is None: raise MRClientGatewayError('Missing "gitlab_project_id".') instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() saas_files = queries.get_saas_files_minimal(v1=True, v2=True) return GitLabApi(instance, project_id=gitlab_project_id, settings=settings, saas_files=saas_files) elif client_type == 'sqs': accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() return SQSGateway(accounts, settings=settings) else: raise MRClientGatewayError(f'Invalid client type: {client_type}')
def run(dry_run): default_days_interval = 15 default_limit = 8 default_enable_closing = False instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_housekeeping(server=instance['url']) for repo in repos: hk = repo['housekeeping'] project_url = repo['url'] days_interval = hk.get('days_interval') or default_days_interval enable_closing = hk.get('enable_closing') or default_enable_closing limit = hk.get('limit') or default_limit gl = GitLabApi(instance, project_url=project_url, settings=settings) handle_stale_items(dry_run, gl, days_interval, enable_closing, 'issue') handle_stale_items(dry_run, gl, days_interval, enable_closing, 'merge-request') rebase = hk.get('rebase') try: merge_merge_requests(dry_run, gl, limit, rebase, insist=True) except Exception: merge_merge_requests(dry_run, gl, limit, rebase) if rebase: rebase_merge_requests(dry_run, gl, limit)
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() code_components = queries.get_code_components() app_int_repos = [c['url'] for c in code_components] saas_bundle_repos = [ c['url'] for c in code_components if c['resource'] == 'bundle' ] gl = GitLabApi(instance, settings=settings) project_requests = instance['projectRequests'] or [] error = False for pr in project_requests: group = pr['group'] group_id, existing_projects = gl.get_group_id_and_projects(group) requested_projects = pr['projects'] projects_to_create = [ p for p in requested_projects if p not in existing_projects ] for p in projects_to_create: project_url = gl.get_project_url(group, p) if project_url not in app_int_repos: logging.error(f'{project_url} missing from all codeComponents') error = True continue logging.info(['create_project', group, p]) if not dry_run: gl.create_project(group_id, p) if project_url in saas_bundle_repos: logging.info(['initiate_saas_bundle_repo', group, p]) if not dry_run: gl.initiate_saas_bundle_repo(project_url) sys.exit(error)
def run(dry_run, gitlab_project_id=None, gitlab_merge_request_id=None): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) labels = gl.get_merge_request_labels(gitlab_merge_request_id) output = 'yes' if 'skip-ci' in labels else 'no' print(output)
def run(dry_run, thread_pool_size=10): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_owner(server=instance['url']) threaded.run(act, repos, thread_pool_size, dry_run=dry_run, instance=instance, settings=settings)
def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) # Remove saas-file targets that are disabled for saas_file in saas_files[:]: resource_templates = saas_file['resourceTemplates'] for rt in resource_templates[:]: targets = rt['targets'] for target in targets[:]: if target['disable']: targets.remove(target) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_moving_commits_diff(dry_run) already_triggered = [] error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name(saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_moving_commit(job_spec) except Exception: error = True logging.error( f"could not trigger job {job_name} in {instance_name}.") if error: sys.exit(1)
def run(dry_run, thread_pool_size=10): saas_files = queries.get_saas_files() if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, accounts=accounts) trigger_specs = saasherder.get_configs_diff() already_triggered = [] error = True # enter loop while error: error = False for job_spec in trigger_specs: saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] instance_name = job_spec['instance_name'] job_name = get_openshift_saas_deploy_job_name( saas_file_name, env_name, settings) if job_name not in already_triggered: logging.info(['trigger_job', instance_name, job_name]) if dry_run: already_triggered.append(job_name) if not dry_run: jenkins = jenkins_map[instance_name] try: if job_name not in already_triggered: jenkins.trigger_job(job_name) already_triggered.append(job_name) saasherder.update_config(job_spec) except Exception as e: error = True logging.error( f"could not trigger job {job_name} " + f"in {instance_name}. details: {str(e)}" ) if error: time.sleep(10) # add to contants module once created
def run(dry_run, thread_pool_size=10, saas_file_name=None, env_name=None, defer=None): saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() desired_jenkins_instances = [s['instance']['name'] for s in saas_files] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances) settings = queries.get_app_interface_settings() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map) if not saasherder.valid: sys.exit(1) if len(saasherder.namespaces) == 0: logging.warning('no targets found') sys.exit(0) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # if saas_file_name is defined, the integration # is being called from multiple running instances ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=True, take_over=saasherder.take_over) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, wait_for_pipeline): default_days_interval = 15 default_limit = 8 default_enable_closing = False instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_housekeeping(server=instance["url"]) for repo in repos: hk = repo["housekeeping"] project_url = repo["url"] days_interval = hk.get("days_interval") or default_days_interval enable_closing = hk.get("enable_closing") or default_enable_closing limit = hk.get("limit") or default_limit pipeline_timeout = hk.get("pipeline_timeout") gl = GitLabApi(instance, project_url=project_url, settings=settings) handle_stale_items(dry_run, gl, days_interval, enable_closing, "issue") handle_stale_items(dry_run, gl, days_interval, enable_closing, "merge-request") rebase = hk.get("rebase") try: merge_merge_requests( dry_run, gl, limit, rebase, pipeline_timeout, insist=True, wait_for_pipeline=wait_for_pipeline, gl_instance=instance, gl_settings=settings, ) except Exception: merge_merge_requests( dry_run, gl, limit, rebase, pipeline_timeout, wait_for_pipeline=wait_for_pipeline, gl_instance=instance, gl_settings=settings, ) if rebase: rebase_merge_requests( dry_run, gl, limit, pipeline_timeout=pipeline_timeout, wait_for_pipeline=wait_for_pipeline, gl_instance=instance, gl_settings=settings, )
def __new__(cls, url, *args, **kwargs): parsed_url = urlparse(url) settings = queries.get_app_interface_settings() if 'github' in parsed_url.hostname: instance = queries.get_github_instance() return GithubApi(instance, repo_url=url, settings=settings) if 'gitlab' in parsed_url.hostname: instance = queries.get_gitlab_instance() return GitLabApi(instance, project_url=url, settings=settings)
def __init__(self, project_id, mr_id, maintainers_group): self.exit_code = self.OK self.maintainers_group = maintainers_group self.instance = queries.get_gitlab_instance() self.settings = queries.get_app_interface_settings() self.gl_cli = GitLabApi(self.instance, project_id=project_id, settings=self.settings) self.mr = self.gl_cli.get_merge_request(mr_id)
def run(dry_run, gitlab_project_id): settings = queries.get_app_interface_settings() accounts = queries.get_queue_aws_accounts() sqs_cli = SQSGateway(accounts, settings=settings) instance = queries.get_gitlab_instance() saas_files = queries.get_saas_files_minimal(v1=True, v2=True) gitlab_cli = GitLabApi(instance, project_id=gitlab_project_id, settings=settings, saas_files=saas_files) errors_occured = False while True: messages = sqs_cli.receive_messages() logging.info("received %s messages", len(messages)) if not messages: # sqs_cli.receive_messages delivers messages in chunks # until the queue is empty... when that happens, # we end this integration run break # not all integrations are going to resend their MR messages # therefore we need to be careful not to delete any messages # before they have been properly handled for m in messages: receipt_handle, body = m[0], m[1] logging.info("received message %s with body %s", receipt_handle[:6], json.dumps(body)) if not dry_run: try: merge_request = mr.init_from_sqs_message(body) merge_request.submit_to_gitlab(gitlab_cli=gitlab_cli) sqs_cli.delete_message(str(receipt_handle)) except mr.UnknownMergeRequestType as ex: # Received an unknown MR type. # This could be a producer being on a newer version # of qontract-reconcile than the consumer. # Therefore we don't delete it from the queue for # potential future processing. # TODO - monitor age of messages in queue logging.warning(ex) errors_occured = True except mr.MergeRequestProcessingError as processing_error: logging.error(processing_error) errors_occured = True if errors_occured: sys.exit(1)
def __new__(cls, url: str, *args, **kwargs): parsed_url = urlparse(url) settings = queries.get_app_interface_settings() if parsed_url.hostname: if "github" in parsed_url.hostname: instance = queries.get_github_instance() return GithubApi(instance, repo_url=url, settings=settings) if "gitlab" in parsed_url.hostname: instance = queries.get_gitlab_instance() return GitLabApi(instance, project_url=url, settings=settings) raise ValueError(f"Unable to handle URL: {url}")
def run(dry_run, gitlab_project_id=None, gitlab_merge_request_id=None): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) project_labels = gl.get_project_labels() labels = gl.get_merge_request_labels(gitlab_merge_request_id) changed_paths = \ gl.get_merge_request_changed_paths(gitlab_merge_request_id) guessed_labels = guess_labels(project_labels, changed_paths) labels_to_add = [l for l in guessed_labels if l not in labels] if labels_to_add: logging.info(['add_labels', labels_to_add]) gl.add_labels_to_merge_request(gitlab_merge_request_id, labels_to_add)
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) current_state = get_current_state(instance, gl) desired_state = get_desired_state(instance, gl) diffs = calculate_diff(current_state, desired_state) for diff in diffs: logging.info(list(diff.values())) if not dry_run: act(diff, gl)
def run(dry_run=False, days_interval=15, enable_closing=False, limit=1): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_housekeeping(server=instance['url']) for repo in repos: project_url = repo['url'] gl = GitLabApi(instance, project_url=project_url, settings=settings) handle_stale_items(dry_run, gl, days_interval, enable_closing, 'issue') handle_stale_items(dry_run, gl, days_interval, enable_closing, 'merge-request') merge_merge_requests(dry_run, gl, limit) if repo['enable_rebase']: rebase_merge_requests(dry_run, gl, limit)
def run(dry_run=False, thread_pool_size=10): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) repos = queries.get_repos(server=gl.server) app_sre = gl.get_app_sre_group_users() results = threaded.run(get_members_to_add, repos, thread_pool_size, gl=gl, app_sre=app_sre) members_to_add = [item for sublist in results for item in sublist] for m in members_to_add: logging.info(['add_maintainer', m["repo"], m["user"].username]) if not dry_run: gl.add_project_member(m["repo"], m["user"])
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) previous_urls = queries.get_jenkins_instances_previous_urls() repos = queries.get_repos(server=gl.server) for repo in repos: hooks = gl.get_project_hooks(repo) for hook in hooks: hook_url = hook.url for previous_url in previous_urls: if hook_url.startswith(previous_url): logging.info(['delete_hook', repo, hook_url]) if not dry_run: hook.delete()
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): users = init_users() user_specs = threaded.run(init_user_spec, users, thread_pool_size) users_to_delete = [(username, paths) for username, delete, paths in user_specs if delete] if not dry_run: instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) for username, paths in users_to_delete: logging.info(['delete_user', username]) if not dry_run: gl.create_delete_user_mr(username, paths)
def init(gitlab_project_id=None, override_pr_gateway_type=None): pr_gateway_type = override_pr_gateway_type or get_pr_gateway_type() if pr_gateway_type == 'gitlab': instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() if gitlab_project_id is None: raise PullRequestGatewayError('missing gitlab project id') return GitLabApi(instance, project_id=gitlab_project_id, settings=settings) elif pr_gateway_type == 'sqs': accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() return SQSGateway(accounts, settings=settings) else: raise PullRequestGatewayError( 'invalid pull request gateway: {}'.format(pr_gateway_type))
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) secret_reader = SecretReader(settings=settings) # Jira repos = queries.get_repos_gitlab_jira(server=gl.server) for repo in repos: skip = False repo_url = repo['url'] project = gl.get_project(repo_url=repo_url) services = project.services current_jira = services.get('jira') desired_jira = repo['jira'] desired_jira_url = desired_jira['serverUrl'] desired_jira_crdentials = \ secret_reader.read_all(desired_jira['token']) if current_jira.active: properties = current_jira.properties desired_jira_username = desired_jira_crdentials['username'] if properties['url'] == desired_jira_url \ and properties['username'] == desired_jira_username: skip = True if skip: continue logging.info(['update_jira', repo_url, desired_jira_url]) if not dry_run: new_data = { 'active': True, 'url': desired_jira_url, 'username': desired_jira_crdentials['username'], 'password': desired_jira_crdentials['password'], 'commit_events': True, 'merge_requests_events': True, 'comment_on_event_enabled': False } services.update('jira', new_data=new_data)
def run(dry_run=False, thread_pool_size=10, saas_file_name=None, env_name=None, defer=None): saas_files = queries.get_saas_files(saas_file_name, env_name) if not saas_files: logging.error('no saas files found') sys.exit(1) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder(saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings) if not saasherder.valid: sys.exit(1) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) # if saas_file_name is defined, the integration # is being called from multiple running instances ob.realize_data(dry_run, oc_map, ri, caller=saas_file_name) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, gitlab_project_id=None, gitlab_merge_request_id=None) -> None: instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) project_labels = gl.get_project_labels() labels = gl.get_merge_request_labels(gitlab_merge_request_id) changed_paths = gl.get_merge_request_changed_paths(gitlab_merge_request_id) guessed_labels = guess_labels(project_labels, changed_paths) labels_to_add = [b for b in guessed_labels if b not in labels] labels_to_create = [b for b in labels_to_add if b not in project_labels] # This integration cannot check dry-run mode as it's always running with # dry_run flag to true. if labels_to_create: logging.info(["create_labels", labels_to_create]) for label in labels_to_create: gl.create_label(label, LABEL_COLOR) if labels_to_add: logging.info(["add_labels", labels_to_add]) gl.add_labels_to_merge_request(gitlab_merge_request_id, labels_to_add)
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) secret_reader = SecretReader(settings=settings) # Jira repos = queries.get_repos_gitlab_jira(server=gl.server) for repo in repos: skip = False repo_url = repo["url"] services = get_repo_services(gl, repo_url) current_jira = services.get("jira") desired_jira = repo["jira"] desired_jira_url = desired_jira["serverUrl"] desired_jira_crdentials = secret_reader.read_all(desired_jira["token"]) if current_jira.active: properties = current_jira.properties desired_jira_username = desired_jira_crdentials["username"] if (properties["url"] == desired_jira_url and properties["username"] == desired_jira_username): skip = True if skip: continue logging.info(["update_jira", repo_url, desired_jira_url]) if not dry_run: new_data = { "active": True, "url": desired_jira_url, "username": desired_jira_crdentials["username"], "password": desired_jira_crdentials["password"], "commit_events": True, "merge_requests_events": True, "comment_on_event_enabled": False, } services.update("jira", new_data=new_data)
def run(dry_run=False, thread_pool_size=10, defer=None): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() aws_accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) saas_files = queries.get_saas_files() saasherder = SaasHerder(saas_files, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) enable_deletion = False if ri.has_error_registered() else True ob.realize_data(dry_run, oc_map, ri, enable_deletion=enable_deletion) saasherder.slack_notify(dry_run, aws_accounts, ri)
def run(dry_run, gitlab_project_id): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() sqs_cli = SQSGateway(accounts, settings=settings) instance = queries.get_gitlab_instance() saas_files = queries.get_saas_files_minimal() gitlab_cli = GitLabApi(instance, project_id=gitlab_project_id, settings=settings, saas_files=saas_files) while True: messages = sqs_cli.receive_messages() logging.info('received %s messages', len(messages)) if not messages: break for message in messages: # Let's first delete all the message we received, # otherwise they will come back in 30s. receipt_handle = message[0] sqs_cli.delete_message(str(receipt_handle)) for message in messages: # Time to process the messages. Any failure here is not # critical, even though we already deleted the messaged, # since the producers will keep re-sending the message # until the MR gets merged to app-interface receipt_handle, body = message[0], message[1] logging.info('received message %s with body %s', receipt_handle[:6], json.dumps(body)) if not dry_run: merge_request = mr.init_from_sqs_message(body) merge_request.submit_to_gitlab(gitlab_cli=gitlab_cli)
def setup(thread_pool_size, internal, use_jump_host, integration, integration_version, v1, v2): """Setup required resources for triggering integrations Args: thread_pool_size (int): Thread pool size to use internal (bool): Should run for internal/extrenal/all clusters use_jump_host (bool): Should use jump host to reach clusters integration (string): Name of calling integration integration_version (string): Version of calling integration v1 (bool): Should trigger for v1 SaaS files v2 (bool): Should trigger for v2 SaaS files Returns: saasherder (SaasHerder): a SaasHerder instance jenkins_map (dict): Instance names with JenkinsApi instances oc_map (OC_Map): a dictionary of OC clients per cluster settings (dict): App-interface settings error (bool): True if one happened, False otherwise """ saas_files = queries.get_saas_files(v1=v1, v2=v2) if not saas_files: logging.error("no saas files found") return None, None, None, None, True saas_files = [sf for sf in saas_files if is_in_shard(sf["name"])] # Remove saas-file targets that are disabled for saas_file in saas_files[:]: resource_templates = saas_file["resourceTemplates"] for rt in resource_templates[:]: targets = rt["targets"] for target in targets[:]: if target["disable"]: targets.remove(target) instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() gl = GitLabApi(instance, settings=settings) jenkins_map = jenkins_base.get_jenkins_map() pipelines_providers = queries.get_pipelines_providers() tkn_provider_namespaces = [ pp["namespace"] for pp in pipelines_providers if pp["provider"] == "tekton" ] oc_map = OC_Map( namespaces=tkn_provider_namespaces, integration=integration, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=integration, integration_version=integration_version, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) return saasherder, jenkins_map, oc_map, settings, False
def get_gitlab_api(): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() return GitLabApi(instance, settings=settings)
def run( dry_run, thread_pool_size=10, io_dir="throughput/", saas_file_name=None, env_name=None, gitlab_project_id=None, defer=None, ): all_saas_files = queries.get_saas_files(v1=True, v2=True) saas_files = queries.get_saas_files(saas_file_name, env_name, v1=True, v2=True) app_interface_settings = queries.get_app_interface_settings() if not saas_files: logging.error("no saas files found") sys.exit(ExitCodes.ERROR) # notify different outputs (publish results, slack notifications) # we only do this if: # - this is not a dry run # - there is a single saas file deployed notify = not dry_run and len(saas_files) == 1 if notify: saas_file = saas_files[0] slack_info = saas_file.get("slack") if slack_info: slack = slackapi_from_slack_workspace( slack_info, app_interface_settings, QONTRACT_INTEGRATION, init_usergroups=False, ) # support built-in start and end slack notifications # only in v2 saas files if saas_file["apiVersion"] == "v2": ri = ResourceInventory() console_url = compose_console_url(saas_file, saas_file_name, env_name) # deployment result notification defer( lambda: slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=False, ) ) # deployment start notification slack_notifications = slack_info.get("notifications") if slack_notifications and slack_notifications.get("start"): slack_notify( saas_file_name, env_name, slack, ri, console_url, in_progress=True, ) else: slack = None instance = queries.get_gitlab_instance() # instance exists in v1 saas files only desired_jenkins_instances = [ s["instance"]["name"] for s in saas_files if s.get("instance") ] jenkins_map = jenkins_base.get_jenkins_map( desired_instances=desired_jenkins_instances ) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() try: gl = GitLabApi(instance, settings=settings) except Exception: # allow execution without access to gitlab # as long as there are no access attempts. gl = None saasherder = SaasHerder( saas_files, thread_pool_size=thread_pool_size, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings, jenkins_map=jenkins_map, accounts=accounts, ) if len(saasherder.namespaces) == 0: logging.warning("no targets found") sys.exit(ExitCodes.SUCCESS) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, init_api_resources=True, cluster_admin=saasherder.cluster_admin, ) defer(oc_map.cleanup) saasherder.populate_desired_state(ri) # validate that this deployment is valid # based on promotion information in targets if not saasherder.validate_promotions(): logging.error("invalid promotions") ri.register_error() sys.exit(ExitCodes.ERROR) # if saas_file_name is defined, the integration # is being called from multiple running instances actions = ob.realize_data( dry_run, oc_map, ri, thread_pool_size, caller=saas_file_name, wait_for_namespace=True, no_dry_run_skip_compare=(not saasherder.compare), take_over=saasherder.take_over, ) if not dry_run: if saasherder.publish_job_logs: try: ob.follow_logs(oc_map, actions, io_dir) except Exception as e: logging.error(str(e)) ri.register_error() try: ob.validate_data(oc_map, actions) except Exception as e: logging.error(str(e)) ri.register_error() # publish results of this deployment # based on promotion information in targets success = not ri.has_error_registered() # only publish promotions for deployment jobs (a single saas file) if notify: # Auto-promote next stages only if there are changes in the # promoting stage. This prevents trigger promotions on job re-runs auto_promote = len(actions) > 0 mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id) saasherder.publish_promotions(success, all_saas_files, mr_cli, auto_promote) if not success: sys.exit(ExitCodes.ERROR) # send human readable notifications to slack # we only do this if: # - this is not a dry run # - there is a single saas file deployed # - output is 'events' # - no errors were registered if notify and slack and actions and slack_info.get("output") == "events": for action in actions: message = ( f"[{action['cluster']}] " + f"{action['kind']} {action['name']} {action['action']}" ) slack.chat_post_message(message)