def run(dry_run): settings = queries.get_app_interface_settings() gqlapi = gql.get_api() github = init_github() secret_reader = SecretReader(settings=settings) # Reconcile against all sentry instances instances = gqlapi.query(SENTRY_INSTANCES_QUERY)["instances"] tokens = { i["name"]: secret_reader.read(i["automationToken"]) for i in instances } skip_users = { i["name"]: secret_reader.read(i["adminUser"]) for i in instances } for instance in instances: instance_name = instance["name"] token = tokens[instance_name] host = instance["consoleUrl"] sentry_client = SentryClient(host, token) skip_user = skip_users[instance_name] current_state = fetch_current_state(sentry_client, [skip_user]) desired_state = fetch_desired_state(gqlapi, instance, github) reconciler = SentryReconciler(sentry_client, dry_run) reconciler.reconcile(current_state, desired_state)
def get_config(default=False): gqlapi = gql.get_api() orgs = gqlapi.query(ORGS_QUERY)["orgs"] settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) config = {"github": {}} found_defaults = [] for org in orgs: org_name = org["name"] if org.get("default"): found_defaults.append(org_name) elif default: continue token = secret_reader.read(org["token"]) org_config = {"token": token, "managed_teams": org["managedTeams"]} config["github"][org_name] = org_config if default: if len(found_defaults) == 0: raise KeyError("default github org config not found") if len(found_defaults) > 1: raise KeyError("multiple default github org configs found: " f"{found_defaults}") return config
def init_ocm_client(self, cluster_info): """ Initiate OCM client. Gets the OCM information and initiates an OCM client. Skip initiating OCM if it has already been initialized or if the current integration is disabled on it. :param cluster_info: Graphql cluster query result :type cluster_info: dict """ if self.cluster_disabled(cluster_info): return cluster_name = cluster_info['name'] ocm_info = cluster_info['ocm'] ocm_name = ocm_info['name'] # pointer from each cluster to its referenced OCM instance self.clusters_map[cluster_name] = ocm_name if self.ocm_map.get(ocm_name): return access_token_client_id = ocm_info.get('accessTokenClientId') access_token_url = ocm_info.get('accessTokenUrl') ocm_offline_token = ocm_info.get('offlineToken') if ocm_offline_token is None: self.ocm_map[ocm_name] = False else: url = ocm_info['url'] secret_reader = SecretReader(settings=self.settings) token = secret_reader.read(ocm_offline_token) self.ocm_map[ocm_name] = \ OCM(url, access_token_client_id, access_token_url, token)
def __init__(self, jh, settings=None): self.hostname = jh["hostname"] self.user = jh["user"] self.port = 22 if jh["port"] is None else jh["port"] secret_reader = SecretReader(settings=settings) self.identity = secret_reader.read(jh["identity"]) self.init_identity_file()
def __init__(self, instance, project_id=None, ssl_verify=True, settings=None, project_url=None, saas_files=None): self.server = instance['url'] secret_reader = SecretReader(settings=settings) token = secret_reader.read(instance['token']) ssl_verify = instance['sslVerify'] if ssl_verify is None: ssl_verify = True self.gl = gitlab.Gitlab(self.server, private_token=token, ssl_verify=ssl_verify) self.gl.auth() self.user = self.gl.user if project_id is None: # When project_id is not provide, we try to get the project # using the project_url if project_url is not None: parsed_project_url = urlparse(project_url) name_with_namespace = parsed_project_url.path.strip('/') self.project = self.gl.projects.get(name_with_namespace) else: self.project = self.gl.projects.get(project_id) self.saas_files = saas_files
def __init__(self, instance, repo_url, settings, timeout=30): parsed_repo_url = urlparse(repo_url) repo = parsed_repo_url.path.strip('/') secret_reader = SecretReader(settings=settings) token = secret_reader.read(instance['token']) git_cli = github.Github(token, base_url=GH_BASE_URL, timeout=timeout) self.repo = git_cli.get_repo(repo)
def __init__(self, jh, settings=None): self.hostname = jh['hostname'] self.user = jh['user'] self.port = 22 if jh['port'] is None else jh['port'] secret_reader = SecretReader(settings=settings) self.identity = secret_reader.read(jh['identity']) self.init_identity_file()
class JiraClient: """Wrapper around Jira client""" def __init__(self, jira_board, settings=None): self.secret_reader = SecretReader(settings=settings) self.project = jira_board['name'] jira_server = jira_board['server'] self.server = jira_server['serverUrl'] token = jira_server['token'] token_auth = self.secret_reader.read(token) self.jira = JIRA(self.server, token_auth=token_auth) def get_issues(self, fields=None): block_size = 100 block_num = 0 all_issues = [] jql = 'project={}'.format(self.project) kwargs = {} if fields: kwargs['fields'] = ','.join(fields) while True: index = block_num * block_size issues = self.jira.search_issues(jql, index, block_size, **kwargs) all_issues.extend(issues) if len(issues) < block_size: break block_num += 1 return all_issues
def run(dry_run): settings = queries.get_app_interface_settings() gqlapi = gql.get_api() github = init_github() secret_reader = SecretReader(settings=settings) # Reconcile against all sentry instances result = gqlapi.query(SENTRY_INSTANCES_QUERY) for instance in result['instances']: token = secret_reader.read(instance['automationToken']) host = instance['consoleUrl'] sentry_client = SentryClient(host, token) skip_user = secret_reader.read(instance['adminUser']) current_state = fetch_current_state(sentry_client, [skip_user]) desired_state = fetch_desired_state(gqlapi, instance, github) reconciler = SentryReconciler(sentry_client, dry_run) reconciler.reconcile(current_state, desired_state)
def __init__(self, token, ssl_verify=True, settings=None): secret_reader = SecretReader(settings=settings) token_config = secret_reader.read(token) config = toml.loads(token_config) self.url = config['jenkins']['url'] self.user = config['jenkins']['user'] self.password = config['jenkins']['password'] self.ssl_verify = ssl_verify self.should_restart = False self.settings = settings
def __init__(self, workspace_name, token, settings=None, init_usergroups=True, **chat_kwargs): self.workspace_name = workspace_name secret_reader = SecretReader(settings=settings) slack_token = secret_reader.read(token) self.sc = SlackClient(slack_token) self.results = {} self.chat_kwargs = chat_kwargs if init_usergroups: self._initiate_usergroups()
class JiraClient: """Wrapper around Jira client.""" def __init__(self, jira_board: Mapping[str, Any], settings: Optional[Mapping] = None): self.secret_reader = SecretReader(settings=settings) self.project = jira_board["name"] jira_server = jira_board["server"] self.server = jira_server["serverUrl"] token = jira_server["token"] token_auth = self.secret_reader.read(token) self.jira = JIRA(self.server, token_auth=token_auth) def get_issues(self, fields: Optional[Mapping] = None) -> GottenIssue: block_size = 100 block_num = 0 all_issues: GottenIssue = [] jql = "project={}".format(self.project) kwargs: dict[str, Any] = {} if fields: kwargs["fields"] = ",".join(fields) while True: index = block_num * block_size issues = self.jira.search_issues(jql, index, block_size, **kwargs) all_issues.extend(issues) if len(issues) < block_size: break block_num += 1 return all_issues def create_issue( self, summary: str, body: str, labels: Optional[Iterable[str]] = None, links: Iterable[str] = (), ) -> Issue: """Create an issue in our project with the given labels.""" issue = self.jira.create_issue( project=self.project, summary=summary, description=body, labels=labels, issuetype={"name": "Task"}, ) for ln in links: self.jira.create_issue_link(type="is caused by", inwardIssue=issue.key, outwardIssue=ln) return issue
def run(dry_run): gqlapi = gql.get_api() result = gqlapi.query(REPOS_QUERY) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret = settings["githubRepoInvites"]["credentials"] token = secret_reader.read(secret) g = raw_github_api.RawGithubApi(token) code_components = _parse_code_components(result["apps_v1"]) accepted_invitations = _accept_invitations(g, code_components, dry_run) return accepted_invitations
def bot_login(ctx, cluster_name): settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) clusters = queries.get_clusters() clusters = [c for c in clusters if c['name'] == cluster_name] if len(clusters) == 0: print(f"{cluster_name} not found.") sys.exit(1) cluster = clusters[0] server = cluster['serverUrl'] token = secret_reader.read(cluster['automationToken']) print(f"oc login --server {server} --token {token}")
def get_encrypted_credentials(credentials_name, user, settings): credentials_map = settings["credentials"] credentials_map_item = [ c for c in credentials_map if c["name"] == credentials_name ] if len(credentials_map_item) != 1: return None secret = credentials_map_item[0]["secret"] secret_reader = SecretReader(settings=settings) credentials = secret_reader.read(secret) public_gpg_key = user["public_gpg_key"] encrypted_credentials = gpg_encrypt(credentials, public_gpg_key) return encrypted_credentials
def get_encrypted_credentials(credentials_name, user, settings): credentials_map = settings['credentials'] credentials_map_item = \ [c for c in credentials_map if c['name'] == credentials_name] if len(credentials_map_item) != 1: return None secret = credentials_map_item[0]['secret'] secret_reader = SecretReader(settings=settings) credentials = secret_reader.read(secret) public_gpg_key = user['public_gpg_key'] encrypted_credentials = \ gpg_encrypt(credentials, public_gpg_key) return encrypted_credentials
def get_config(desired_org_name=None): gqlapi = gql.get_api() orgs = gqlapi.query(ORGS_QUERY)['orgs'] settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) config = {'github': {}} for org in orgs: org_name = org['name'] if desired_org_name and org_name != desired_org_name: continue token = secret_reader.read(org['token']) org_config = {'token': token, 'managed_teams': org['managedTeams']} config['github'][org_name] = org_config return config
def run(dry_run): settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) users = queries.get_roles(aws=False, saas_files=False, sendgrid=True) desired_state_all = fetch_desired_state(users) sendgrid_accounts = queries.get_sendgrid_accounts() for sg_account in sendgrid_accounts: token = secret_reader.read(sg_account['token']) sg_client = sendgrid.SendGridAPIClient(api_key=token).client current_state = fetch_current_state(sg_client) desired_state = desired_state_all.get(sg_account['name'], []) error = act(dry_run, sg_client, desired_state, current_state) if error: sys.exit(ExitCodes.ERROR)
def promquery(cluster, query): """Run a PromQL query""" config_data = config.get_config() auth = { 'path': config_data['promql-auth']['secret_path'], 'field': 'token' } settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) prom_auth_creds = secret_reader.read(auth) prom_auth = requests.auth.HTTPBasicAuth(*prom_auth_creds.split(':')) url = f"https://prometheus.{cluster}.devshift.net/api/v1/query" response = requests.get(url, params={'query': query}, auth=prom_auth) response.raise_for_status() print(json.dumps(response.json(), indent=4))
def init_ocm_client(self, cluster_info, init_provision_shards, init_addons): """ Initiate OCM client. Gets the OCM information and initiates an OCM client. Skip initiating OCM if it has already been initialized or if the current integration is disabled on it. :param cluster_info: Graphql cluster query result :param init_provision_shards: should initiate provision shards :param init_addons: should initiate addons :type cluster_info: dict """ if self.cluster_disabled(cluster_info): return cluster_name = cluster_info["name"] ocm_info = cluster_info["ocm"] ocm_name = ocm_info["name"] # pointer from each cluster to its referenced OCM instance self.clusters_map[cluster_name] = ocm_name if self.ocm_map.get(ocm_name): return access_token_client_id = ocm_info.get("accessTokenClientId") access_token_url = ocm_info.get("accessTokenUrl") ocm_offline_token = ocm_info.get("offlineToken") if ocm_offline_token is None: self.ocm_map[ocm_name] = False else: url = ocm_info["url"] name = ocm_info["name"] secret_reader = SecretReader(settings=self.settings) token = secret_reader.read(ocm_offline_token) self.ocm_map[ocm_name] = OCM( name, url, access_token_client_id, access_token_url, token, init_provision_shards=init_provision_shards, init_addons=init_addons, blocked_versions=ocm_info.get("blockedVersions"), )
def run(dry_run): gqlapi = gql.get_api() result = gqlapi.query(REPOS_QUERY) config = get_config()['github-repo-invites'] settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret = {'path': config['secret_path'], 'field': config['secret_field']} token = secret_reader.read(secret) g = raw_github_api.RawGithubApi(token) urls = set() known_orgs = set() for app in result['apps_v1']: code_components = app['codeComponents'] if code_components is None: continue for code_component in app['codeComponents']: url = code_component['url'] urls.add(url) org = url[:url.rindex('/')] known_orgs.add(org) invitations = set() for i in g.repo_invitations(): invitation_id = i['id'] invitation_url = i['html_url'] url = os.path.dirname(invitation_url) accept = url in urls or any(url.startswith(org) for org in known_orgs) if accept: logging.info(['accept', url]) invitations.add(url) if not dry_run: g.accept_repo_invitation(invitation_id) else: logging.debug(['skipping', url]) return invitations
def init_oc_client(self, cluster_info): cluster = cluster_info['name'] if self.oc_map.get(cluster): return None if self.cluster_disabled(cluster_info): return None if self.internal is not None: # integration is executed with `--internal` or `--external` # filter out non matching clusters if self.internal and not cluster_info['internal']: return if not self.internal and cluster_info['internal']: return automation_token = cluster_info.get('automationToken') if automation_token is None: self.set_oc( cluster, OCLogMsg(log_level=logging.ERROR, message=f"[{cluster}]" " has no automation token")) else: server_url = cluster_info['serverUrl'] secret_reader = SecretReader(settings=self.settings) token = secret_reader.read(automation_token) if self.use_jump_host: jump_host = cluster_info.get('jumpHost') else: jump_host = None try: oc_client = OC(server_url, token, jump_host, settings=self.settings, init_projects=self.init_projects, init_api_resources=self.init_api_resources) self.set_oc(cluster, oc_client) except StatusCodeError as e: self.set_oc( cluster, OCLogMsg(log_level=logging.ERROR, message=f"[{cluster}]" f" is unreachable: {e}"))
def get_quay_api_store(): """ Returns a dictionary with a key for each Quay organization managed in app-interface. Each key contains an initiated QuayApi instance. """ quay_orgs = queries.get_quay_orgs() settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) store = {} for org_data in quay_orgs: name = org_data['name'] server_url = org_data.get('serverUrl') token = secret_reader.read(org_data['automationToken']) store[name] = { 'api': QuayApi(token, name, base_url=server_url), 'teams': org_data.get('managedTeams') } return store
def run(dry_run): base_url = os.environ.get('GITHUB_API', 'https://api.github.com') orgs = queries.get_github_orgs() settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) error = False for org in orgs: org_name = org['name'] token = secret_reader.read(org['token']) gh = Github(token, base_url=base_url) gh_org = gh.get_organization(org_name) current_2fa = gh_org.two_factor_requirement_enabled desired_2fa = org['two_factor_authentication'] or False if current_2fa != desired_2fa: logging.error(f"2FA mismatch for {org_name}") error = True if error: sys.exit(1)
def __init__(self, workspace_name: str, token: Mapping[str, str], api_config: Optional[SlackApiConfig] = None, settings: Optional[Mapping[str, Any]] = None, init_usergroups=True, channel: Optional[str] = None, **chat_kwargs) -> None: """ :param workspace_name: Slack workspace name (ex. coreos) :param token: data to pass to SecretReader.read() to get the token :param api_config: Slack API configuration :param settings: settings to pass to SecretReader :param init_usergroups: whether or not to get a list of all Slack usergroups when instantiated :param channel: the Slack channel to post messages to, only used when posting messages to a channel :param chat_kwargs: any other kwargs that can be used to post Slack channel messages """ self.workspace_name = workspace_name if api_config: self.config = api_config else: self.config = SlackApiConfig() secret_reader = SecretReader(settings=settings) slack_token = secret_reader.read(token) self._sc = WebClient(token=slack_token, timeout=self.config.timeout) self._configure_client_retry() self._results: Dict[str, Any] = {} self.channel = channel self.chat_kwargs = chat_kwargs if init_usergroups: self._initiate_usergroups()
def get_quay_api_store(): """ Returns a dictionary with a key for each Quay organization managed in app-interface. Each key contains an initiated QuayApi instance. """ quay_orgs = queries.get_quay_orgs() settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) store = {} for org_data in quay_orgs: instance_name = org_data['instance']['name'] org_name = org_data['name'] org_key = OrgKey(instance_name, org_name) base_url = org_data['instance']['url'] token = secret_reader.read(org_data['automationToken']) if org_data.get('mirror'): mirror = OrgKey(org_data['mirror']['instance']['name'], org_data['mirror']['name']) else: mirror = None if org_data.get('pushCredentials'): push_token = secret_reader.read_all(org_data['pushCredentials']) else: push_token = None store[org_key] = { 'url': base_url, 'api': QuayApi(token, org_name, base_url=base_url), 'push_token': push_token, 'teams': org_data.get('managedTeams'), 'managedRepos': org_data.get('managedRepos'), 'mirror': mirror, } return store
def get_quay_api_store(managed_repos=False): """ Returns a dictionary with a key for each Quay organization managed in app-interface. Each key contains an initiated QuayApi instance. """ quay_orgs = queries.get_quay_orgs() settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) store = {} for org_data in quay_orgs: if managed_repos and not org_data['managedRepos']: continue instance_name = org_data['instance']['name'] org_name = org_data['name'] org_key = OrgKey(instance_name, org_name) base_url = org_data['instance']['url'] token = secret_reader.read(org_data['automationToken']) store[org_key] = { 'api': QuayApi(token, org_name, base_url=base_url), 'teams': org_data.get('managedTeams') } return store
def fetch_current_state(unleash_instance): api_url = f"{unleash_instance['url']}/api" secret_reader = SecretReader(settings=queries.get_app_interface_settings()) admin_access_token = \ secret_reader.read(unleash_instance['token']) return get_feature_toggles(api_url, admin_access_token)
def __init__(self, token, settings=None): secret_reader = SecretReader(settings=settings) pd_api_key = secret_reader.read(token) pypd.api_key = pd_api_key self.init_users()
class SaasHerder(): """Wrapper around SaaS deployment actions.""" def __init__(self, saas_files, thread_pool_size, gitlab, integration, integration_version, settings, jenkins_map=None, accounts=None, validate=False): self.saas_files = saas_files if validate: self._validate_saas_files() if not self.valid: return self.thread_pool_size = thread_pool_size self.gitlab = gitlab self.integration = integration self.integration_version = integration_version self.settings = settings self.secret_reader = SecretReader(settings=settings) self.namespaces = self._collect_namespaces() self.jenkins_map = jenkins_map # each namespace is in fact a target, # so we can use it to calculate. divisor = len(self.namespaces) or 1 self.available_thread_pool_size = \ threaded.estimate_available_thread_pool_size( self.thread_pool_size, divisor) # if called by a single saas file,it may # specify that it manages resources exclusively. self.take_over = self._get_saas_file_attribute('takeover') self.compare = self._get_saas_file_attribute('compare') self.publish_job_logs = self._get_saas_file_attribute('publishJobLogs') if accounts: self._initiate_state(accounts) def _get_saas_file_attribute(self, attribute): return len(self.saas_files) == 1 and self.saas_files[0].get(attribute) def _validate_saas_files(self): self.valid = True saas_file_name_path_map = {} saas_file_promotion_publish_channels = [] for saas_file in self.saas_files: saas_file_name = saas_file['name'] saas_file_path = saas_file['path'] saas_file_name_path_map.setdefault(saas_file_name, []) saas_file_name_path_map[saas_file_name].append(saas_file_path) saas_file_owners = [ u['org_username'] for r in saas_file['roles'] for u in r['users'] ] if not saas_file_owners: msg = 'saas file {} has no owners: {}' logging.error(msg.format(saas_file_name, saas_file_path)) self.valid = False for resource_template in saas_file['resourceTemplates']: resource_template_name = resource_template['name'] for target in resource_template['targets']: # promotion publish channels promotion = target.get('promotion') if promotion: publish = promotion.get('publish') if publish: saas_file_promotion_publish_channels.extend( publish) # validate target parameters target_parameters = target['parameters'] if not target_parameters: continue target_parameters = json.loads(target_parameters) target_namespace = target['namespace'] namespace_name = target_namespace['name'] cluster_name = target_namespace['cluster']['name'] environment = target_namespace['environment'] environment_name = environment['name'] environment_parameters = environment['parameters'] if not environment_parameters: continue environment_parameters = \ json.loads(environment_parameters) msg = \ f'[{saas_file_name}/{resource_template_name}] ' + \ 'parameter found in target ' + \ f'{cluster_name}/{namespace_name} ' + \ f'should be reused from env {environment_name}' for t_key, t_value in target_parameters.items(): if not isinstance(t_value, str): continue for e_key, e_value in environment_parameters.items(): if not isinstance(e_value, str): continue if '.' not in e_value: continue if e_value not in t_value: continue if t_key == e_key and t_value == e_value: details = \ f'consider removing {t_key}' else: replacement = t_value.replace( e_value, '${' + e_key + '}') details = \ f'target: \"{t_key}: {t_value}\". ' + \ f'env: \"{e_key}: {e_value}\". ' + \ f'consider \"{t_key}: {replacement}\"' logging.warning(f'{msg}: {details}') # saas file name duplicates duplicates = { saas_file_name: saas_file_paths for saas_file_name, saas_file_paths in saas_file_name_path_map.items() if len(saas_file_paths) > 1 } if duplicates: self.valid = False msg = 'saas file name {} is not unique: {}' for saas_file_name, saas_file_paths in duplicates.items(): logging.error(msg.format(saas_file_name, saas_file_paths)) # promotion publish channel duplicates duplicates = [ p for p in saas_file_promotion_publish_channels if saas_file_promotion_publish_channels.count(p) > 1 ] if duplicates: self.valid = False msg = 'saas file promotion publish channel is not unique: {}' for duplicate in duplicates: logging.error(msg.format(duplicate)) def _collect_namespaces(self): # namespaces may appear more then once in the result namespaces = [] for saas_file in self.saas_files: managed_resource_types = saas_file['managedResourceTypes'] resource_templates = saas_file['resourceTemplates'] for rt in resource_templates: targets = rt['targets'] for target in targets: namespace = target['namespace'] if target.get('disable'): logging.debug( f"[{saas_file['name']}/{rt['name']}] target " + f"{namespace['cluster']['name']}/" + f"{namespace['name']} is disabled.") continue # managedResourceTypes is defined per saas_file # add it to each namespace in the current saas_file namespace['managedResourceTypes'] = managed_resource_types namespaces.append(namespace) return namespaces def _initiate_state(self, accounts): self.state = State(integration=self.integration, accounts=accounts, settings=self.settings) @staticmethod def _collect_parameters(container): parameters = container.get('parameters') or {} if isinstance(parameters, str): parameters = json.loads(parameters) # adjust Python's True/False for k, v in parameters.items(): if v is True: parameters[k] = 'true' elif v is False: parameters[k] = 'false' elif any([isinstance(v, t) for t in [dict, list, tuple]]): parameters[k] = json.dumps(v) return parameters @staticmethod def _get_file_contents_github(repo, path, commit_sha): try: f = repo.get_contents(path, commit_sha) return f.decoded_content except GithubException as e: # slightly copied with love from # https://github.com/PyGithub/PyGithub/issues/661 errors = e.data['errors'] # example errors dict that we are looking for # { # 'message': '<text>', # 'errors': [{ # 'resource': 'Blob', # 'field': 'data', # 'code': 'too_large' # }], # 'documentation_url': '<url>' # } for error in errors: if error['code'] == 'too_large': # get large files tree = repo.get_git_tree(commit_sha, recursive='/' in path).tree for x in tree: if x.path != path.lstrip('/'): continue blob = repo.get_git_blob(x.sha) return base64.b64decode(blob.content).decode("utf8") raise e @retry() def _get_file_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = f"{url}/blob/{ref}{path}" commit_sha = self._get_commit_sha(options) content = None if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) content = self._get_file_contents_github(repo, path, commit_sha) elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) f = project.files.get(file_path=path.lstrip('/'), ref=commit_sha) content = f.decode() return yaml.safe_load(content), html_url, commit_sha @retry() def _get_directory_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = f"{url}/tree/{ref}{path}" commit_sha = self._get_commit_sha(options) resources = [] if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) for f in repo.get_contents(path, commit_sha): file_path = os.path.join(path, f.name) file_contents_decoded = \ self._get_file_contents_github( repo, file_path, commit_sha) resource = yaml.safe_load(file_contents_decoded) resources.append(resource) elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) for f in project.repository_tree(path=path.lstrip('/'), ref=commit_sha, all=True): file_contents = \ project.files.get(file_path=f['path'], ref=commit_sha) resource = yaml.safe_load(file_contents.decode()) resources.append(resource) return resources, html_url, commit_sha @retry() def _get_commit_sha(self, options): url = options['url'] ref = options['ref'] github = options['github'] hash_length = options.get('hash_length') commit_sha = '' if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) commit = repo.get_commit(sha=ref) commit_sha = commit.sha elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) commits = project.commits.list(ref_name=ref) commit_sha = commits[0].id if hash_length: return commit_sha[:hash_length] return commit_sha @staticmethod def _get_cluster_and_namespace(target): cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] return cluster, namespace @staticmethod def _additional_resource_process(resources, html_url): for resource in resources: # add a definition annotation to each PrometheusRule rule if resource['kind'] == 'PrometheusRule': try: groups = resource['spec']['groups'] for group in groups: rules = group['rules'] for rule in rules: annotations = rule.get('annotations') if not annotations: continue rule['annotations']['html_url'] = html_url except Exception: logging.warning('could not add html_url annotation to' + resource['name']) @staticmethod def _parameter_value_needed(parameter_name, consolidated_parameters, template): """Is a parameter named in the template but unspecified? NOTE: This is currently "parameter *named* and absent" -- i.e. we don't care about `required: true`. This is for backward compatibility. :param parameter_name: The name (key) of the parameter. :param consolidated_parameters: Dict of parameters already specified/ calculated. :param template: The template file in dict form. :return bool: True if the named parameter is named in the template, but not already present in consolidated_parameters. """ if parameter_name in consolidated_parameters: return False for template_parameter in template.get("parameters", {}): if template_parameter["name"] == parameter_name: return True return False def _process_template(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] image_auth = options['image_auth'] url = options['url'] path = options['path'] provider = options['provider'] target = options['target'] github = options['github'] target_ref = target['ref'] target_promotion = target.get('promotion') or {} resources = None html_url = None commit_sha = None if provider == 'openshift-template': hash_length = options['hash_length'] parameters = options['parameters'] environment = target['namespace']['environment'] environment_parameters = self._collect_parameters(environment) target_parameters = self._collect_parameters(target) consolidated_parameters = {} consolidated_parameters.update(environment_parameters) consolidated_parameters.update(parameters) consolidated_parameters.update(target_parameters) for replace_key, replace_value in consolidated_parameters.items(): if not isinstance(replace_value, str): continue replace_pattern = '${' + replace_key + '}' for k, v in consolidated_parameters.items(): if not isinstance(v, str): continue if replace_pattern in v: consolidated_parameters[k] = \ v.replace(replace_pattern, replace_value) get_file_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: template, html_url, commit_sha = \ self._get_file_contents(get_file_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching template: {str(e)}") return None, None, None # add IMAGE_TAG only if it is unspecified image_tag = consolidated_parameters.get('IMAGE_TAG') if not image_tag: sha_substring = commit_sha[:hash_length] # IMAGE_TAG takes one of two forms: # - If saas file attribute 'use_channel_in_image_tag' is true, # it is {CHANNEL}-{SHA} # - Otherwise it is just {SHA} if self._get_saas_file_attribute("use_channel_in_image_tag"): try: channel = consolidated_parameters["CHANNEL"] except KeyError: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: CHANNEL is required when " + "'use_channel_in_image_tag' is true.") return None, None, None image_tag = f"{channel}-{sha_substring}" else: image_tag = sha_substring consolidated_parameters['IMAGE_TAG'] = image_tag # This relies on IMAGE_TAG already being calculated. need_repo_digest = self._parameter_value_needed( "REPO_DIGEST", consolidated_parameters, template) need_image_digest = self._parameter_value_needed( "IMAGE_DIGEST", consolidated_parameters, template) if need_repo_digest or need_image_digest: try: logging.debug("Generating REPO_DIGEST.") registry_image = consolidated_parameters["REGISTRY_IMG"] except KeyError as e: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: error generating REPO_DIGEST. " + "Is REGISTRY_IMG missing? " + f"{str(e)}") return None, None, None try: image_uri = f"{registry_image}:{image_tag}" img = Image(image_uri, **image_auth) if need_repo_digest: consolidated_parameters["REPO_DIGEST"] = img.url_digest if need_image_digest: consolidated_parameters["IMAGE_DIGEST"] = img.digest except (rqexc.ConnectionError, rqexc.HTTPError) as e: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: error generating REPO_DIGEST for " + f"{image_uri}: {str(e)}") return None, None, None oc = OC('server', 'token', local=True) try: resources = oc.process(template, consolidated_parameters) except StatusCodeError as e: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: error processing template: {str(e)}") elif provider == 'directory': get_directory_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: resources, html_url, commit_sha = \ self._get_directory_contents( get_directory_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching directory: {str(e)}") return None, None, None else: logging.error(f"[{saas_file_name}/{resource_template_name}] " + f"unknown provider: {provider}") target_promotion['commit_sha'] = commit_sha return resources, html_url, target_promotion @staticmethod def _collect_images(resource): images = set() # resources with pod templates try: template = resource["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # init containers try: template = resource["spec"]["template"] for c in template["spec"]["initContainers"]: images.add(c["image"]) except KeyError: pass # CronJob try: template = resource["spec"]["jobTemplate"]["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # CatalogSource templates try: images.add(resource["spec"]["image"]) except KeyError: pass return images @staticmethod def _check_image(image, image_patterns, image_auth, error_prefix): error = False if image_patterns and \ not any(image.startswith(p) for p in image_patterns): error = True logging.error( f"{error_prefix} Image is not in imagePatterns: {image}") try: valid = Image(image, **image_auth) if not valid: error = True logging.error(f"{error_prefix} Image does not exist: {image}") except Exception as e: error = True logging.error(f"{error_prefix} Image is invalid: {image}. " + f"details: {str(e)}") return error def _check_images(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] html_url = options['html_url'] resources = options['resources'] image_auth = options['image_auth'] image_patterns = options['image_patterns'] error_prefix = \ f"[{saas_file_name}/{resource_template_name}] {html_url}:" images_list = threaded.run(self._collect_images, resources, self.available_thread_pool_size) images = {item for sublist in images_list for item in sublist} if not images: return False # no errors errors = threaded.run(self._check_image, images, self.available_thread_pool_size, image_patterns=image_patterns, image_auth=image_auth, error_prefix=error_prefix) error = True in errors return error def _initiate_github(self, saas_file): auth = saas_file.get('authentication') or {} auth_code = auth.get('code') or {} if auth_code: token = self.secret_reader.read(auth_code) else: # use the app-sre token by default default_org_name = 'app-sre' config = get_config(desired_org_name=default_org_name) token = config['github'][default_org_name]['token'] base_url = os.environ.get('GITHUB_API', 'https://api.github.com') # This is a threaded world. Let's define a big # connections pool to live in that world # (this avoids the warning "Connection pool is # full, discarding connection: api.github.com") pool_size = 100 return Github(token, base_url=base_url, pool_size=pool_size) def _initiate_image_auth(self, saas_file): """ This function initiates a dict required for image authentication. This dict will be used as kwargs for sertoolbox's Image. The image authentication secret specified in the saas file must contain the 'user' and 'token' keys, and may optionally contain a 'url' key specifying the image registry url to be passed to check if an image should be checked using these credentials. The function returns the keys extracted from the secret in the structure expected by sretoolbox's Image: 'user' --> 'username' 'token' --> 'password' 'url' --> 'auth_server' (optional) """ auth = saas_file.get('authentication') if not auth: return {} auth_image_secret = auth.get('image') if not auth_image_secret: return {} creds = self.secret_reader.read_all(auth_image_secret) required_keys = ['user', 'token'] ok = all(k in creds.keys() for k in required_keys) if not ok: logging.warning( "the specified image authentication secret " + f"found in path {auth_image_secret['path']} " + f"does not contain all required keys: {required_keys}") return {} image_auth = {'username': creds['user'], 'password': creds['token']} url = creds.get('url') if url: image_auth['auth_server']: url return image_auth def populate_desired_state(self, ri): results = threaded.run(self.init_populate_desired_state_specs, self.saas_files, self.thread_pool_size) desired_state_specs = \ [item for sublist in results for item in sublist] promotions = threaded.run(self.populate_desired_state_saas_file, desired_state_specs, self.thread_pool_size, ri=ri) self.promotions = promotions def init_populate_desired_state_specs(self, saas_file): specs = [] saas_file_name = saas_file['name'] github = self._initiate_github(saas_file) image_auth = self._initiate_image_auth(saas_file) instance_name = saas_file['instance']['name'] managed_resource_types = saas_file['managedResourceTypes'] image_patterns = saas_file['imagePatterns'] resource_templates = saas_file['resourceTemplates'] saas_file_parameters = self._collect_parameters(saas_file) # iterate over resource templates (multiple per saas_file) for rt in resource_templates: rt_name = rt['name'] url = rt['url'] path = rt['path'] provider = rt.get('provider') or 'openshift-template' hash_length = rt.get('hash_length') or self.settings['hashLength'] parameters = self._collect_parameters(rt) consolidated_parameters = {} consolidated_parameters.update(saas_file_parameters) consolidated_parameters.update(parameters) # iterate over targets (each target is a namespace) for target in rt['targets']: if target.get('disable'): # a warning is logged during SaasHerder initiation continue cluster, namespace = \ self._get_cluster_and_namespace(target) process_template_options = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'image_auth': image_auth, 'url': url, 'path': path, 'provider': provider, 'hash_length': hash_length, 'target': target, 'parameters': consolidated_parameters, 'github': github } check_images_options_base = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'image_auth': image_auth, 'image_patterns': image_patterns } spec = { 'saas_file_name': saas_file_name, 'cluster': cluster, 'namespace': namespace, 'managed_resource_types': managed_resource_types, 'process_template_options': process_template_options, 'check_images_options_base': check_images_options_base, 'instance_name': instance_name, 'upstream': target.get('upstream') } specs.append(spec) return specs def populate_desired_state_saas_file(self, spec, ri): saas_file_name = spec['saas_file_name'] cluster = spec['cluster'] namespace = spec['namespace'] managed_resource_types = spec['managed_resource_types'] process_template_options = spec['process_template_options'] check_images_options_base = spec['check_images_options_base'] instance_name = spec['instance_name'] upstream = spec['upstream'] resources, html_url, promotion = \ self._process_template(process_template_options) if resources is None: ri.register_error() return # filter resources resources = [ resource for resource in resources if isinstance(resource, dict) and resource['kind'] in managed_resource_types ] # additional processing of resources self._additional_resource_process(resources, html_url) # check images skip_check_images = upstream and self.jenkins_map and \ self.jenkins_map[instance_name].is_job_running(upstream) if skip_check_images: logging.warning("skipping check_image since " + f"upstream job {upstream} is running") else: check_images_options = { 'html_url': html_url, 'resources': resources } check_images_options.update(check_images_options_base) image_error = self._check_images(check_images_options) if image_error: ri.register_error() return # add desired resources for resource in resources: resource_kind = resource['kind'] resource_name = resource['metadata']['name'] oc_resource = OR(resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired(cluster, namespace, resource_kind, resource_name, oc_resource) return promotion def get_moving_commits_diff(self, dry_run): results = threaded.run(self.get_moving_commits_diff_saas_file, self.saas_files, self.thread_pool_size, dry_run=dry_run) return [item for sublist in results for item in sublist] def get_moving_commits_diff_saas_file(self, saas_file, dry_run): saas_file_name = saas_file['name'] instace_name = saas_file['instance']['name'] github = self._initiate_github(saas_file) trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] for target in rt['targets']: # don't trigger if there is a linked upstream job if target.get('upstream'): continue ref = target['ref'] get_commit_sha_options = { 'url': url, 'ref': ref, 'github': github } desired_commit_sha = \ self._get_commit_sha(get_commit_sha_options) # don't trigger on refs which are commit shas if ref == desired_commit_sha: continue namespace = target['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" current_commit_sha = self.state.get(key, None) # skip if there is no change in commit sha if current_commit_sha == desired_commit_sha: continue # don't trigger if this is the first time # this target is being deployed. # that will be taken care of by # openshift-saas-deploy-trigger-configs if current_commit_sha is None: # store the value to take over from now on if not dry_run: self.state.add(key, value=desired_commit_sha) continue # we finally found something we want to trigger on! job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'ref': ref, 'commit_sha': desired_commit_sha } trigger_specs.append(job_spec) return trigger_specs def update_moving_commit(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] ref = job_spec['ref'] commit_sha = job_spec['commit_sha'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" self.state.add(key, value=commit_sha, force=True) def get_configs_diff(self): results = threaded.run(self.get_configs_diff_saas_file, self.saas_files, self.thread_pool_size) return [item for sublist in results for item in sublist] def get_configs_diff_saas_file(self, saas_file): saas_file_name = saas_file['name'] saas_file_parameters = saas_file.get('parameters') saas_file_managed_resource_types = saas_file['managedResourceTypes'] instace_name = saas_file['instance']['name'] trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] path = rt['path'] rt_parameters = rt.get('parameters') for desired_target_config in rt['targets']: namespace = desired_target_config['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] desired_target_config['namespace'] = \ self.sanitize_namespace(namespace) # add parent parameters to target config desired_target_config['saas_file_parameters'] = \ saas_file_parameters # add managed resource types to target config desired_target_config['saas_file_managed_resource_types'] = \ saas_file_managed_resource_types desired_target_config['url'] = url desired_target_config['path'] = path desired_target_config['rt_parameters'] = rt_parameters # get current target config from state key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" current_target_config = self.state.get(key, None) # skip if there is no change in target configuration if current_target_config == desired_target_config: continue job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'target_config': desired_target_config } trigger_specs.append(job_spec) return trigger_specs @staticmethod def sanitize_namespace(namespace): """Only keep fields that should trigger a new job.""" new_job_fields = { 'namespace': ['name', 'cluster', 'app'], 'cluster': ['name', 'serverUrl'], 'app': ['name'] } namespace = { k: v for k, v in namespace.items() if k in new_job_fields['namespace'] } cluster = namespace['cluster'] namespace['cluster'] = { k: v for k, v in cluster.items() if k in new_job_fields['cluster'] } app = namespace['app'] namespace['app'] = { k: v for k, v in app.items() if k in new_job_fields['app'] } return namespace def update_config(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] target_config = job_spec['target_config'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" self.state.add(key, value=target_config, force=True) def validate_promotions(self): """ If there were promotion sections in the participating saas files validate that the conditions are met. """ for item in self.promotions: if item is None: continue # validate that the commit sha being promoted # was succesfully published to the subscribed channel(s) commit_sha = item['commit_sha'] subscribe = item.get('subscribe') if subscribe: for channel in subscribe: state_key = f"promotions/{channel}/{commit_sha}" value = self.state.get(state_key, None) success = value.get('success') if not success: logging.error( f'Commit {commit_sha} was not ' + f'published with success to channel {channel}') return False return True def publish_promotions(self, success, saas_files, mr_cli): """ If there were promotion sections in the participating saas files publish the results for future promotion validations. """ subscribe_saas_file_path_map = \ self._get_subscribe_saas_file_path_map(saas_files, auto_only=True) trigger_promotion = False for item in self.promotions: commit_sha = item['commit_sha'] publish = item.get('publish') if publish: all_subscribed_saas_file_paths = set() for channel in publish: # publish to state to pass promotion gate state_key = f"promotions/{channel}/{commit_sha}" value = {'success': success} self.state.add(state_key, value, force=True) logging.info( f'Commit {commit_sha} was published ' + f'with success {success} to channel {channel}') # collect data to trigger promotion subscribed_saas_file_paths = \ subscribe_saas_file_path_map.get(channel) if subscribed_saas_file_paths: all_subscribed_saas_file_paths.update( subscribed_saas_file_paths) item['saas_file_paths'] = list(all_subscribed_saas_file_paths) if all_subscribed_saas_file_paths: trigger_promotion = True if trigger_promotion: mr = AutoPromoter(self.promotions) mr.submit(cli=mr_cli) @staticmethod def _get_subscribe_saas_file_path_map(saas_files, auto_only=False): """ Returns a dict with subscribe channels as keys and a list of paths of saas files containing these channels. """ subscribe_saas_file_path_map = {} for saas_file in saas_files: saas_file_path = 'data' + saas_file['path'] for rt in saas_file['resourceTemplates']: for target in rt['targets']: target_promotion = target.get('promotion') if not target_promotion: continue target_auto = target_promotion.get('auto') if auto_only and not target_auto: continue subscribe = target_promotion.get('subscribe') if not subscribe: continue for channel in subscribe: subscribe_saas_file_path_map.setdefault(channel, set()) subscribe_saas_file_path_map[channel].add( saas_file_path) return subscribe_saas_file_path_map