def fetch_desired_state(clusters, vault_input_path, settings): desired_state = [] error = False secret_reader = SecretReader(settings=settings) for cluster_info in clusters: cluster = cluster_info['name'] auth = cluster_info['auth'] service = auth['service'] if service != 'github-org-team': # currently not supported continue org = auth['org'] team = auth['team'] secret_path = f'{vault_input_path}/{QONTRACT_INTEGRATION}/' + \ f'{service}/{org}/{team}' secret = {'path': secret_path} try: oauth_data = secret_reader.read_all(secret) client_id = oauth_data['client-id'] client_secret = oauth_data['client-secret'] except Exception: logging.error(f"unable to read secret in path {secret['path']}") error = True continue item = { 'cluster': cluster, 'name': f'github-{org}', 'client_id': client_id, 'client_secret': client_secret, 'teams': [f'{org}/{team}'] } desired_state.append(item) return desired_state, error
def __init__(self, dry_run, thread_pool_size): self.dry_run = dry_run self.thread_pool_size = thread_pool_size self.settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=self.settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) self.dashdotdb_url = secret_content['url'] self.dashdotdb_user = secret_content['username'] self.dashdotdb_pass = secret_content['password']
def get_smtp_config(self, path, settings): config = {} required_keys = ('password', 'port', 'require_tls', 'server', 'username') secret_reader = SecretReader(settings=settings) data = secret_reader.read_all({'path': path}) try: for k in required_keys: config[k] = data[k] except KeyError as e: raise Exception(f'Missing expected SMTP config ' f'key in vault secret: {e}') return config
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) secret_reader = SecretReader(settings=settings) # Jira repos = queries.get_repos_gitlab_jira(server=gl.server) for repo in repos: skip = False repo_url = repo['url'] project = gl.get_project(repo_url=repo_url) services = project.services current_jira = services.get('jira') desired_jira = repo['jira'] desired_jira_url = desired_jira['serverUrl'] desired_jira_crdentials = \ secret_reader.read_all(desired_jira['token']) if current_jira.active: properties = current_jira.properties desired_jira_username = desired_jira_crdentials['username'] if properties['url'] == desired_jira_url \ and properties['username'] == desired_jira_username: skip = True if skip: continue logging.info(['update_jira', repo_url, desired_jira_url]) if not dry_run: new_data = { 'active': True, 'url': desired_jira_url, 'username': desired_jira_crdentials['username'], 'password': desired_jira_crdentials['password'], 'commit_events': True, 'merge_requests_events': True, 'comment_on_event_enabled': False } services.update('jira', new_data=new_data)
class JiraClient(object): """Wrapper around Jira client""" def __init__(self, jira_board, settings=None): self.secret_reader = SecretReader(settings=settings) self.project = jira_board['name'] jira_server = jira_board['server'] self.server = jira_server['serverUrl'] token = jira_server['token'] basic_auth = self.get_basic_auth(token) self.jira = JIRA(self.server, basic_auth=basic_auth) def get_basic_auth(self, token): required_keys = ['username', 'password'] secret = self.secret_reader.read_all(token) ok = all(elem in secret.keys() for elem in required_keys) if not ok: raise KeyError('[{}] secret is missing required keys'.format( self.project)) return (secret['username'], secret['password']) def get_issues(self, fields=None): block_size = 100 block_num = 0 all_issues = [] jql = 'project={}'.format(self.project) kwargs = {} if fields: kwargs['fields'] = ','.join(fields) while True: index = block_num * block_size issues = self.jira.search_issues(jql, index, block_size, **kwargs) all_issues.extend(issues) if len(issues) < block_size: break block_num += 1 return all_issues
class SaasHerder(): """Wrapper around SaaS deployment actions.""" def __init__(self, saas_files, thread_pool_size, gitlab, integration, integration_version, settings, jenkins_map=None, accounts=None, validate=False): self.saas_files = saas_files if validate: self._validate_saas_files() if not self.valid: return self.thread_pool_size = thread_pool_size self.gitlab = gitlab self.integration = integration self.integration_version = integration_version self.settings = settings self.secret_reader = SecretReader(settings=settings) self.namespaces = self._collect_namespaces() self.jenkins_map = jenkins_map # each namespace is in fact a target, # so we can use it to calculate. divisor = len(self.namespaces) or 1 self.available_thread_pool_size = \ threaded.estimate_available_thread_pool_size( self.thread_pool_size, divisor) # if called by a single saas file,it may # specify that it manages resources exclusively. self.take_over = self._get_saas_file_attribute('takeover') self.compare = self._get_saas_file_attribute('compare') self.publish_job_logs = self._get_saas_file_attribute('publishJobLogs') if accounts: self._initiate_state(accounts) def _get_saas_file_attribute(self, attribute): return len(self.saas_files) == 1 and self.saas_files[0].get(attribute) def _validate_saas_files(self): self.valid = True saas_file_name_path_map = {} for saas_file in self.saas_files: saas_file_name = saas_file['name'] saas_file_path = saas_file['path'] saas_file_name_path_map.setdefault(saas_file_name, []) saas_file_name_path_map[saas_file_name].append(saas_file_path) saas_file_owners = [ u['org_username'] for r in saas_file['roles'] for u in r['users'] ] if not saas_file_owners: msg = 'saas file {} has no owners: {}' logging.error(msg.format(saas_file_name, saas_file_path)) self.valid = False for resource_template in saas_file['resourceTemplates']: resource_template_name = resource_template['name'] for target in resource_template['targets']: target_parameters = target['parameters'] if not target_parameters: continue target_parameters = json.loads(target_parameters) target_namespace = target['namespace'] namespace_name = target_namespace['name'] cluster_name = target_namespace['cluster']['name'] environment = target_namespace['environment'] environment_name = environment['name'] environment_parameters = environment['parameters'] if not environment_parameters: continue environment_parameters = \ json.loads(environment_parameters) msg = \ f'[{saas_file_name}/{resource_template_name}] ' + \ f'parameter found in target ' + \ f'{cluster_name}/{namespace_name} ' + \ f'should be reused from env {environment_name}' for t_key, t_value in target_parameters.items(): if not isinstance(t_value, str): continue for e_key, e_value in environment_parameters.items(): if not isinstance(e_value, str): continue if '.' not in e_value: continue if e_value not in t_value: continue if t_key == e_key and t_value == e_value: details = \ f'consider removing {t_key}' else: replacement = t_value.replace( e_value, '${' + e_key + '}') details = \ f'target: \"{t_key}: {t_value}\". ' + \ f'env: \"{e_key}: {e_value}\". ' + \ f'consider \"{t_key}: {replacement}\"' logging.warning(f'{msg}: {details}') duplicates = { saas_file_name: saas_file_paths for saas_file_name, saas_file_paths in saas_file_name_path_map.items() if len(saas_file_paths) > 1 } if duplicates: self.valid = False msg = 'saas file name {} is not unique: {}' for saas_file_name, saas_file_paths in duplicates.items(): logging.error(msg.format(saas_file_name, saas_file_paths)) def _collect_namespaces(self): # namespaces may appear more then once in the result namespaces = [] for saas_file in self.saas_files: managed_resource_types = saas_file['managedResourceTypes'] resource_templates = saas_file['resourceTemplates'] for rt in resource_templates: targets = rt['targets'] for target in targets: namespace = target['namespace'] if target.get('disable'): logging.debug( f"[{saas_file['name']}/{rt['name']}] target " + f"{namespace['cluster']['name']}/" + f"{namespace['name']} is disabled.") continue # managedResourceTypes is defined per saas_file # add it to each namespace in the current saas_file namespace['managedResourceTypes'] = managed_resource_types namespaces.append(namespace) return namespaces def _initiate_state(self, accounts): self.state = State(integration=self.integration, accounts=accounts, settings=self.settings) @staticmethod def _collect_parameters(container): parameters = container.get('parameters') or {} if isinstance(parameters, str): parameters = json.loads(parameters) # adjust Python's True/False for k, v in parameters.items(): if v is True: parameters[k] = 'true' elif v is False: parameters[k] = 'false' elif any([isinstance(v, t) for t in [dict, list, tuple]]): parameters[k] = json.dumps(v) return parameters @retry() def _get_file_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = os.path.join(url, 'blob', ref, path) content = None if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) f = repo.get_contents(path, ref) content = f.decoded_content elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) f = project.files.get(file_path=path.lstrip('/'), ref=ref) content = f.decode() return yaml.safe_load(content), html_url @retry() def _get_directory_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = os.path.join(url, 'tree', ref, path) resources = [] if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) for f in repo.get_contents(path, ref): file_path = os.path.join(path, f.name) file_contents = repo.get_contents(file_path, ref) resource = yaml.safe_load(file_contents.decoded_content) resources.append(resource) elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) for f in project.repository_tree(path=path.lstrip('/'), ref=ref, all=True): file_contents = \ project.files.get(file_path=f['path'], ref=ref) resource = yaml.safe_load(file_contents.decode()) resources.append(resource) return resources, html_url @retry() def _get_commit_sha(self, options): url = options['url'] ref = options['ref'] github = options['github'] hash_length = options.get('hash_length') commit_sha = '' if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) commit = repo.get_commit(sha=ref) commit_sha = commit.sha elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) commits = project.commits.list(ref_name=ref) commit_sha = commits[0].id if hash_length: return commit_sha[:hash_length] return commit_sha @staticmethod def _get_cluster_and_namespace(target): cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] return cluster, namespace def _process_template(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] url = options['url'] path = options['path'] provider = options['provider'] target = options['target'] github = options['github'] target_ref = target['ref'] resources = None html_url = None if provider == 'openshift-template': hash_length = options['hash_length'] parameters = options['parameters'] environment = target['namespace']['environment'] environment_parameters = self._collect_parameters(environment) target_parameters = self._collect_parameters(target) consolidated_parameters = {} consolidated_parameters.update(environment_parameters) consolidated_parameters.update(parameters) consolidated_parameters.update(target_parameters) for replace_key, replace_value in consolidated_parameters.items(): if not isinstance(replace_value, str): continue replace_pattern = '${' + replace_key + '}' for k, v in consolidated_parameters.items(): if not isinstance(v, str): continue if replace_pattern in v: consolidated_parameters[k] = \ v.replace(replace_pattern, replace_value) get_file_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: template, html_url = \ self._get_file_contents(get_file_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching template: {str(e)}") return None, None if "IMAGE_TAG" not in consolidated_parameters: template_parameters = template.get('parameters') if template_parameters is not None: for template_parameter in template_parameters: if template_parameter['name'] == 'IMAGE_TAG': # add IMAGE_TAG only if it is required get_commit_sha_options = { 'url': url, 'ref': target_ref, 'hash_length': hash_length, 'github': github } image_tag = self._get_commit_sha( get_commit_sha_options) consolidated_parameters['IMAGE_TAG'] = image_tag oc = OC('server', 'token', local=True) try: resources = oc.process(template, consolidated_parameters) except StatusCodeError as e: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: error processing template: {str(e)}") elif provider == 'directory': get_directory_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: resources, html_url = \ self._get_directory_contents( get_directory_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching directory: {str(e)}") return None, None else: logging.error(f"[{saas_file_name}/{resource_template_name}] " + f"unknown provider: {provider}") return resources, html_url def _collect_images(self, resource): images = set() # resources with pod templates try: template = resource["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # init containers try: template = resource["spec"]["template"] for c in template["spec"]["initContainers"]: images.add(c["image"]) except KeyError: pass # CronJob try: template = resource["spec"]["jobTemplate"]["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # CatalogSource templates try: images.add(resource["spec"]["image"]) except KeyError: pass return images @staticmethod def _check_image(image, image_patterns, image_auth, error_prefix): error = False if image_patterns and \ not any(image.startswith(p) for p in image_patterns): error = True logging.error( f"{error_prefix} Image is not in imagePatterns: {image}") try: valid = Image(image, **image_auth) if not valid: error = True logging.error(f"{error_prefix} Image does not exist: {image}") except Exception as e: error = True logging.error(f"{error_prefix} Image is invalid: {image}. " + f"details: {str(e)}") return error def _check_images(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] html_url = options['html_url'] resources = options['resources'] image_auth = options['image_auth'] image_patterns = options['image_patterns'] error_prefix = \ f"[{saas_file_name}/{resource_template_name}] {html_url}:" images_list = threaded.run(self._collect_images, resources, self.available_thread_pool_size) images = set([item for sublist in images_list for item in sublist]) if not images: return False # no errors errors = threaded.run(self._check_image, images, self.available_thread_pool_size, image_patterns=image_patterns, image_auth=image_auth, error_prefix=error_prefix) error = True in errors return error def _initiate_github(self, saas_file): auth = saas_file.get('authentication') or {} auth_code = auth.get('code') or {} if auth_code: token = self.secret_reader.read(auth_code) else: # use the app-sre token by default default_org_name = 'app-sre' config = get_config(desired_org_name=default_org_name) token = config['github'][default_org_name]['token'] base_url = os.environ.get('GITHUB_API', 'https://api.github.com') return Github(token, base_url=base_url) def _initiate_image_auth(self, saas_file): """ This function initiates a dict required for image authentication. This dict will be used as kwargs for sertoolbox's Image. The image authentication secret specified in the saas file must contain the 'user' and 'token' keys, and may optionally contain a 'url' key specifying the image registry url to be passed to check if an image should be checked using these credentials. The function returns the keys extracted from the secret in the structure expected by sretoolbox's Image: 'user' --> 'username' 'token' --> 'password' 'url' --> 'auth_server' (optional) """ auth = saas_file.get('authentication') if not auth: return {} auth_image_secret = auth.get('image') if not auth_image_secret: return {} creds = self.secret_reader.read_all(auth_image_secret) required_keys = ['user', 'token'] ok = all(k in creds.keys() for k in required_keys) if not ok: logging.warning( "the specified image authentication secret " + f"found in path {auth_image_secret['path']} " + f"does not contain all required keys: {required_keys}") return {} image_auth = {'username': creds['user'], 'password': creds['token']} url = creds.get('url') if url: image_auth['auth_server']: url return image_auth def populate_desired_state(self, ri): results = threaded.run(self.init_populate_desired_state_specs, self.saas_files, self.thread_pool_size) desired_state_specs = \ [item for sublist in results for item in sublist] threaded.run(self.populate_desired_state_saas_file, desired_state_specs, self.thread_pool_size, ri=ri) def init_populate_desired_state_specs(self, saas_file): specs = [] saas_file_name = saas_file['name'] github = self._initiate_github(saas_file) image_auth = self._initiate_image_auth(saas_file) instance_name = saas_file['instance']['name'] managed_resource_types = saas_file['managedResourceTypes'] image_patterns = saas_file['imagePatterns'] resource_templates = saas_file['resourceTemplates'] saas_file_parameters = self._collect_parameters(saas_file) # iterate over resource templates (multiple per saas_file) for rt in resource_templates: rt_name = rt['name'] url = rt['url'] path = rt['path'] provider = rt.get('provider') or 'openshift-template' hash_length = rt.get('hash_length') or self.settings['hashLength'] parameters = self._collect_parameters(rt) consolidated_parameters = {} consolidated_parameters.update(saas_file_parameters) consolidated_parameters.update(parameters) # iterate over targets (each target is a namespace) for target in rt['targets']: if target.get('disable'): # a warning is logged during SaasHerder initiation continue cluster, namespace = \ self._get_cluster_and_namespace(target) process_template_options = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'url': url, 'path': path, 'provider': provider, 'hash_length': hash_length, 'target': target, 'parameters': consolidated_parameters, 'github': github } check_images_options_base = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'image_auth': image_auth, 'image_patterns': image_patterns } spec = { 'saas_file_name': saas_file_name, 'cluster': cluster, 'namespace': namespace, 'managed_resource_types': managed_resource_types, 'process_template_options': process_template_options, 'check_images_options_base': check_images_options_base, 'instance_name': instance_name, 'upstream': target.get('upstream') } specs.append(spec) return specs def populate_desired_state_saas_file(self, spec, ri): saas_file_name = spec['saas_file_name'] cluster = spec['cluster'] namespace = spec['namespace'] managed_resource_types = spec['managed_resource_types'] process_template_options = spec['process_template_options'] check_images_options_base = spec['check_images_options_base'] instance_name = spec['instance_name'] upstream = spec['upstream'] resources, html_url = \ self._process_template(process_template_options) if resources is None: ri.register_error() return # filter resources resources = [ resource for resource in resources if isinstance(resource, dict) and resource['kind'] in managed_resource_types ] # check images skip_check_images = upstream and self.jenkins_map and \ self.jenkins_map[instance_name].is_job_running(upstream) if skip_check_images: logging.warning(f"skipping check_image since " + f"upstream job {upstream} is running") else: check_images_options = { 'html_url': html_url, 'resources': resources } check_images_options.update(check_images_options_base) image_error = self._check_images(check_images_options) if image_error: ri.register_error() return # add desired resources for resource in resources: resource_kind = resource['kind'] resource_name = resource['metadata']['name'] oc_resource = OR(resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired(cluster, namespace, resource_kind, resource_name, oc_resource) def get_moving_commits_diff(self, dry_run): results = threaded.run(self.get_moving_commits_diff_saas_file, self.saas_files, self.thread_pool_size, dry_run=dry_run) return [item for sublist in results for item in sublist] def get_moving_commits_diff_saas_file(self, saas_file, dry_run): saas_file_name = saas_file['name'] instace_name = saas_file['instance']['name'] github = self._initiate_github(saas_file) trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] for target in rt['targets']: # don't trigger if there is a linked upstream job if target.get('upstream'): continue ref = target['ref'] get_commit_sha_options = { 'url': url, 'ref': ref, 'github': github } desired_commit_sha = \ self._get_commit_sha(get_commit_sha_options) # don't trigger on refs which are commit shas if ref == desired_commit_sha: continue namespace = target['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" current_commit_sha = self.state.get(key, None) # skip if there is no change in commit sha if current_commit_sha == desired_commit_sha: continue # don't trigger if this is the first time # this target is being deployed. # that will be taken care of by # openshift-saas-deploy-trigger-configs if current_commit_sha is None: # store the value to take over from now on if not dry_run: self.state.add(key, value=desired_commit_sha) continue # we finally found something we want to trigger on! job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'ref': ref, 'commit_sha': desired_commit_sha } trigger_specs.append(job_spec) return trigger_specs def update_moving_commit(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] ref = job_spec['ref'] commit_sha = job_spec['commit_sha'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" self.state.add(key, value=commit_sha, force=True) def get_configs_diff(self): results = threaded.run(self.get_configs_diff_saas_file, self.saas_files, self.thread_pool_size) return [item for sublist in results for item in sublist] def get_configs_diff_saas_file(self, saas_file): saas_file_name = saas_file['name'] saas_file_parameters = saas_file.get('parameters') saas_file_managed_resource_types = saas_file['managedResourceTypes'] instace_name = saas_file['instance']['name'] trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] path = rt['path'] rt_parameters = rt.get('parameters') for desired_target_config in rt['targets']: namespace = desired_target_config['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] desired_target_config['namespace'] = \ self.sanitize_namespace(namespace) # add parent parameters to target config desired_target_config['saas_file_parameters'] = \ saas_file_parameters # add managed resource types to target config desired_target_config['saas_file_managed_resource_types'] = \ saas_file_managed_resource_types desired_target_config['url'] = url desired_target_config['path'] = path desired_target_config['rt_parameters'] = rt_parameters # get current target config from state key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" current_target_config = self.state.get(key, None) # skip if there is no change in target configuration if current_target_config == desired_target_config: continue job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'target_config': desired_target_config } trigger_specs.append(job_spec) return trigger_specs @staticmethod def sanitize_namespace(namespace): """Only keep fields that should trigger a new job.""" new_job_fields = { 'namespace': ['name', 'cluster', 'app'], 'cluster': ['name', 'serverUrl'], 'app': ['name'] } namespace = { k: v for k, v in namespace.items() if k in new_job_fields['namespace'] } cluster = namespace['cluster'] namespace['cluster'] = { k: v for k, v in cluster.items() if k in new_job_fields['cluster'] } app = namespace['app'] namespace['app'] = { k: v for k, v in app.items() if k in new_job_fields['app'] } return namespace def update_config(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] target_config = job_spec['target_config'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" self.state.add(key, value=target_config, force=True)
class QuayMirror: QUAY_ORG_CATALOG_QUERY = """ { quay_orgs: quay_orgs_v1 { name pushCredentials { path field } } } """ QUAY_REPOS_QUERY = """ { apps: apps_v1 { quayRepos { org { name serverUrl } items { name public mirror { url pullCredentials { path field } tags tagsExclude } } } } } """ def __init__(self, dry_run=False): self.dry_run = dry_run self.gqlapi = gql.get_api() settings = queries.get_app_interface_settings() self.secret_reader = SecretReader(settings=settings) self.skopeo_cli = Skopeo(dry_run) self.push_creds = self._get_push_creds() def run(self): sync_tasks = self.process_sync_tasks() for org, data in sync_tasks.items(): for item in data: try: self.skopeo_cli.copy(src_image=item['mirror_url'], src_creds=item['mirror_creds'], dst_image=item['image_url'], dest_creds=self.push_creds[org]) except SkopeoCmdError as details: _LOG.error('[%s]', details) def process_repos_query(self): result = self.gqlapi.query(self.QUAY_REPOS_QUERY) summary = defaultdict(list) for app in result['apps']: quay_repos = app.get('quayRepos') if quay_repos is None: continue for quay_repo in quay_repos: org = quay_repo['org']['name'] server_url = quay_repo['org'].get('serverUrl') or 'quay.io' for item in quay_repo['items']: if item['mirror'] is None: continue mirror_image = Image(item['mirror']['url']) if (mirror_image.registry == 'docker.io' and mirror_image.repository == 'library' and item['public']): _LOG.error( "Image %s can't be mirrored to a public " "quay repository.", mirror_image) continue summary[org].append({ 'name': item["name"], 'mirror': item['mirror'], 'server_url': server_url }) return summary def sync_tag(self, tags, tags_exclude, candidate): if tags is not None: for tag in tags: if re.match(tag, candidate): return True # When tags is defined, we don't look at # tags_exclude return False if tags_exclude is not None: for tag_exclude in tags_exclude: if re.match(tag_exclude, candidate): return False return True # Both tags and tags_exclude are None, so # tag must be synced return True def process_sync_tasks(self): eight_hours = 28800 # 60 * 60 * 8 is_deep_sync = self._is_deep_sync(interval=eight_hours) summary = self.process_repos_query() sync_tasks = defaultdict(list) for org, data in summary.items(): for item in data: push_creds = self.push_creds[org].split(':') image = Image(f'{item["server_url"]}/{org}/{item["name"]}', username=push_creds[0], password=push_creds[1]) mirror_url = item['mirror']['url'] username = None password = None mirror_creds = None if item['mirror']['pullCredentials'] is not None: pull_credentials = item['mirror']['pullCredentials'] raw_data = self.secret_reader.read_all(pull_credentials) username = raw_data["user"] password = raw_data["token"] mirror_creds = f'{username}:{password}' image_mirror = Image(mirror_url, username=username, password=password) tags = item['mirror'].get('tags') tags_exclude = item['mirror'].get('tagsExclude') for tag in image_mirror: if not self.sync_tag(tags=tags, tags_exclude=tags_exclude, candidate=tag): continue upstream = image_mirror[tag] downstream = image[tag] if tag not in image: _LOG.debug('Image %s and mirror %s are out off sync', downstream, upstream) sync_tasks[org].append({ 'mirror_url': str(upstream), 'mirror_creds': mirror_creds, 'image_url': str(downstream) }) continue # Deep (slow) check only in non dry-run mode if self.dry_run: _LOG.debug('Image %s and mirror %s are in sync', downstream, upstream) continue # Deep (slow) check only from time to time if not is_deep_sync: _LOG.debug('Image %s and mirror %s are in sync', downstream, upstream) continue try: if downstream == upstream: _LOG.debug('Image %s and mirror %s are in sync', downstream, upstream) continue except ImageComparisonError as details: _LOG.error('[%s]', details) continue _LOG.debug('Image %s and mirror %s are out of sync', downstream, upstream) sync_tasks[org].append({ 'mirror_url': str(upstream), 'mirror_creds': mirror_creds, 'image_url': str(downstream) }) return sync_tasks def _is_deep_sync(self, interval): control_file_name = 'qontract-reconcile-quay-mirror.timestamp' control_file_path = os.path.join(tempfile.gettempdir(), control_file_name) try: with open(control_file_path, 'r') as file_obj: last_deep_sync = float(file_obj.read()) except FileNotFoundError: self._record_timestamp(control_file_path) return True next_deep_sync = last_deep_sync + interval if time.time() >= next_deep_sync: self._record_timestamp(control_file_path) return True return False @staticmethod def _record_timestamp(path): with open(path, 'w') as file_object: file_object.write(str(time.time())) def _get_push_creds(self): result = self.gqlapi.query(self.QUAY_ORG_CATALOG_QUERY) creds = {} for org_data in result['quay_orgs']: push_secret = org_data['pushCredentials'] if push_secret is None: continue raw_data = self.secret_reader.read_all(push_secret) org = org_data['name'] creds[org] = f'{raw_data["user"]}:{raw_data["token"]}' return creds
def get_apps_data(date, month_delta=1): apps = queries.get_apps() jjb, _ = init_jjb() saas_jobs = jjb.get_all_jobs(job_types=['saas-deploy', 'promote-to-prod']) build_master_jobs = jjb.get_all_jobs(job_types=['build-master']) jenkins_map = jenkins_base.get_jenkins_map() time_limit = date - relativedelta(months=month_delta) timestamp_limit = \ int(time_limit.replace(tzinfo=timezone.utc).timestamp()) saas_build_history = \ get_build_history(jenkins_map, saas_jobs, timestamp_limit) build_master_build_history = \ get_build_history(jenkins_map, build_master_jobs, timestamp_limit) settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret_content = secret_reader.read_all({'path': DASHDOTDB_SECRET}) dashdotdb_url = secret_content['url'] dashdotdb_user = secret_content['username'] dashdotdb_pass = secret_content['password'] metrics = requests.get(f'{dashdotdb_url}/api/v1/metrics', auth=(dashdotdb_user, dashdotdb_pass)).text namespaces = queries.get_namespaces() for app in apps: if not app['codeComponents']: continue app_name = app['name'] logging.info(f"collecting promotions for {app_name}") app['promotions'] = {} saas_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'saasrepo' ] for sr in saas_repos: sr_history = saas_build_history.get(sr) if not sr_history: continue successes = [h for h in sr_history if h == 'SUCCESS'] app['promotions'][sr] = (len(sr_history), len(successes)) logging.info(f"collecting merge activity for {app_name}") app['merge_activity'] = {} code_repos = [ c['url'] for c in app['codeComponents'] if c['resource'] == 'upstream' ] for cr in code_repos: cr_history = build_master_build_history.get(cr) if not cr_history: continue successes = [h for h in cr_history if h == 'SUCCESS'] app['merge_activity'][cr] = (len(cr_history), len(successes)) logging.info(f"collecting vulnerabilities information for {app_name}") app_namespaces = [] for namespace in namespaces: if namespace['app']['name'] != app['name']: continue app_namespaces.append(namespace) app_metrics = {} for family in text_string_to_metric_families(metrics): for sample in family.samples: if sample.name != 'imagemanifestvuln_total': continue for app_namespace in app_namespaces: cluster = sample.labels['cluster'] if app_namespace['cluster']['name'] != cluster: continue namespace = sample.labels['namespace'] if app_namespace['name'] != namespace: continue severity = sample.labels['severity'] if cluster not in app_metrics: app_metrics[cluster] = {} if namespace not in app_metrics[cluster]: app_metrics[cluster][namespace] = {} if severity not in app_metrics[cluster][namespace]: value = int(sample.value) app_metrics[cluster][namespace][severity] = value app['container_vulnerabilities'] = app_metrics return apps
class AWSApi(object): """Wrapper around AWS SDK""" def __init__(self, thread_pool_size, accounts, settings=None, init_ecr_auth_tokens=False): self.thread_pool_size = thread_pool_size self.secret_reader = SecretReader(settings=settings) self.init_sessions_and_resources(accounts) if init_ecr_auth_tokens: self.init_ecr_auth_tokens(accounts) self.init_users() self._lock = Lock() self.resource_types = \ ['s3', 'sqs', 'dynamodb', 'rds', 'rds_snapshots'] # store the app-interface accounts in a dictionary indexed by name self.accounts = {acc['name']: acc for acc in accounts} def init_sessions_and_resources(self, accounts): results = threaded.run(self.get_tf_secrets, accounts, self.thread_pool_size) self.sessions = {} self.resources = {} for account, secret in results: access_key = secret['aws_access_key_id'] secret_key = secret['aws_secret_access_key'] region_name = secret['region'] session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region_name, ) self.sessions[account] = session self.resources[account] = {} def get_session(self, account): return self.sessions[account] def get_tf_secrets(self, account): account_name = account['name'] automation_token = account['automationToken'] secret = self.secret_reader.read_all(automation_token) return (account_name, secret) def init_users(self): self.users = {} for account, s in self.sessions.items(): iam = s.client('iam') users = [u['UserName'] for u in iam.list_users()['Users']] self.users[account] = users def simulate_deleted_users(self, io_dir): src_integrations = ['terraform_resources', 'terraform_users'] if not os.path.exists(io_dir): return for i in src_integrations: file_path = os.path.join(io_dir, i + '.json') if not os.path.exists(file_path): continue with open(file_path, 'r') as f: deleted_users = json.load(f) for deleted_user in deleted_users: delete_from_account = deleted_user['account'] delete_user = deleted_user['user'] self.users[delete_from_account].remove(delete_user) def map_resources(self): threaded.run(self.map_resource, self.resource_types, self.thread_pool_size) def map_resource(self, resource_type): if resource_type == 's3': self.map_s3_resources() elif resource_type == 'sqs': self.map_sqs_resources() elif resource_type == 'dynamodb': self.map_dynamodb_resources() elif resource_type == 'rds': self.map_rds_resources() elif resource_type == 'rds_snapshots': self.map_rds_snapshots() elif resource_type == 'route53': self.map_route53_resources() else: raise InvalidResourceTypeError(resource_type) def map_s3_resources(self): for account, s in self.sessions.items(): s3 = s.client('s3') buckets_list = s3.list_buckets() if 'Buckets' not in buckets_list: continue buckets = [b['Name'] for b in buckets_list['Buckets']] self.set_resouces(account, 's3', buckets) buckets_without_owner = \ self.get_resources_without_owner(account, buckets) unfiltered_buckets = \ self.custom_s3_filter(account, s3, buckets_without_owner) self.set_resouces(account, 's3_no_owner', unfiltered_buckets) def map_sqs_resources(self): for account, s in self.sessions.items(): sqs = s.client('sqs') queues_list = sqs.list_queues() if 'QueueUrls' not in queues_list: continue queues = queues_list['QueueUrls'] self.set_resouces(account, 'sqs', queues) queues_without_owner = \ self.get_resources_without_owner(account, queues) unfiltered_queues = \ self.custom_sqs_filter(account, sqs, queues_without_owner) self.set_resouces(account, 'sqs_no_owner', unfiltered_queues) def map_dynamodb_resources(self): for account, s in self.sessions.items(): dynamodb = s.client('dynamodb') tables = self.paginate(dynamodb, 'list_tables', 'TableNames') self.set_resouces(account, 'dynamodb', tables) tables_without_owner = \ self.get_resources_without_owner(account, tables) unfiltered_tables = \ self.custom_dynamodb_filter( account, s, dynamodb, tables_without_owner ) self.set_resouces(account, 'dynamodb_no_owner', unfiltered_tables) def map_rds_resources(self): for account, s in self.sessions.items(): rds = s.client('rds') results = \ self.paginate(rds, 'describe_db_instances', 'DBInstances') instances = [t['DBInstanceIdentifier'] for t in results] self.set_resouces(account, 'rds', instances) instances_without_owner = \ self.get_resources_without_owner(account, instances) unfiltered_instances = \ self.custom_rds_filter(account, rds, instances_without_owner) self.set_resouces(account, 'rds_no_owner', unfiltered_instances) def map_rds_snapshots(self): self.wait_for_resource('rds') for account, s in self.sessions.items(): rds = s.client('rds') results = \ self.paginate(rds, 'describe_db_snapshots', 'DBSnapshots') snapshots = [t['DBSnapshotIdentifier'] for t in results] self.set_resouces(account, 'rds_snapshots', snapshots) snapshots_without_db = [ t['DBSnapshotIdentifier'] for t in results if t['DBInstanceIdentifier'] not in self.resources[account]['rds'] ] unfiltered_snapshots = \ self.custom_rds_snapshot_filter(account, rds, snapshots_without_db) self.set_resouces(account, 'rds_snapshots_no_owner', unfiltered_snapshots) def map_route53_resources(self): for account, s in self.sessions.items(): client = s.client('route53') results = \ self.paginate(client, 'list_hosted_zones', 'HostedZones') zones = [z for z in results] for zone in zones: results = \ self.paginate(client, 'list_resource_record_sets', 'ResourceRecordSets', {'HostedZoneId': zone['Id']}) zone['records'] = results self.set_resouces(account, 'route53', zones) def map_ecr_resources(self): for account, s in self.sessions.items(): client = s.client('ecr') repositories = self.paginate(client=client, method='describe_repositories', key='repositories') self.set_resouces(account, 'ecr', repositories) def paginate(self, client, method, key, params={}): """ paginate returns an aggregated list of the specified key from all pages returned by executing the client's specified method.""" paginator = client.get_paginator(method) return [ values for page in paginator.paginate(**params) for values in page.get(key, []) ] def wait_for_resource(self, resource): """ wait_for_resource waits until the specified resource type is ready for all accounts. When we have more resource types then threads, this function will need to change to a dependency graph.""" wait = True while wait: wait = False for account in self.sessions: if self.resources[account].get(resource) is None: wait = True if wait: time.sleep(2) def set_resouces(self, account, key, value): with self._lock: self.resources[account][key] = value def get_resources_without_owner(self, account, resources): return [r for r in resources if not self.has_owner(account, r)] def has_owner(self, account, resource): has_owner = False for u in self.users[account]: if resource.lower().startswith(u.lower()): has_owner = True break if '://' in resource: if resource.split('/')[-1].startswith(u.lower()): has_owner = True break return has_owner def custom_s3_filter(self, account, s3, buckets): type = 's3 bucket' unfiltered_buckets = [] for b in buckets: try: tags = s3.get_bucket_tagging(Bucket=b) except botocore.exceptions.ClientError: tags = {} if not self.should_filter(account, type, b, tags, 'TagSet'): unfiltered_buckets.append(b) return unfiltered_buckets def custom_sqs_filter(self, account, sqs, queues): type = 'sqs queue' unfiltered_queues = [] for q in queues: tags = sqs.list_queue_tags(QueueUrl=q) if not self.should_filter(account, type, q, tags, 'Tags'): unfiltered_queues.append(q) return unfiltered_queues def custom_dynamodb_filter(self, account, session, dynamodb, tables): type = 'dynamodb table' dynamodb_resource = session.resource('dynamodb') unfiltered_tables = [] for t in tables: table_arn = dynamodb_resource.Table(t).table_arn tags = dynamodb.list_tags_of_resource(ResourceArn=table_arn) if not self.should_filter(account, type, t, tags, 'Tags'): unfiltered_tables.append(t) return unfiltered_tables def custom_rds_filter(self, account, rds, instances): type = 'rds instance' unfiltered_instances = [] for i in instances: instance = rds.describe_db_instances(DBInstanceIdentifier=i) instance_arn = instance['DBInstances'][0]['DBInstanceArn'] tags = rds.list_tags_for_resource(ResourceName=instance_arn) if not self.should_filter(account, type, i, tags, 'TagList'): unfiltered_instances.append(i) return unfiltered_instances def custom_rds_snapshot_filter(self, account, rds, snapshots): type = 'rds snapshots' unfiltered_snapshots = [] for s in snapshots: snapshot = rds.describe_db_snapshots(DBSnapshotIdentifier=s) snapshot_arn = snapshot['DBSnapshots'][0]['DBSnapshotArn'] tags = rds.list_tags_for_resource(ResourceName=snapshot_arn) if not self.should_filter(account, type, s, tags, 'TagList'): unfiltered_snapshots.append(s) return unfiltered_snapshots def should_filter(self, account, resource_type, resource_name, resource_tags, tags_key): if self.resource_has_special_name(account, resource_type, resource_name): return True if tags_key in resource_tags: tags = resource_tags[tags_key] if self.resource_has_special_tags(account, resource_type, resource_name, tags): return True return False def resource_has_special_name(self, account, type, resource): skip_msg = '[{}] skipping {} '.format(account, type) + \ '({} related) {}' ignore_names = { 'production': ['prod'], 'stage': ['stage', 'staging'], 'terraform': ['terraform', '-tf-'], } for msg, tags in ignore_names.items(): for tag in tags: if tag.lower() in resource.lower(): logging.debug(skip_msg.format(msg, resource)) return True return False def resource_has_special_tags(self, account, type, resource, tags): skip_msg = '[{}] skipping {} '.format(account, type) + \ '({}={}) {}' ignore_tags = { 'ENV': ['prod', 'stage', 'staging'], 'environment': ['prod', 'stage', 'staging'], 'owner': ['app-sre'], 'managed_by_integration': ['terraform_resources', 'terraform_users'], 'aws_gc_hands_off': ['true'], } for tag, ignore_values in ignore_tags.items(): for ignore_value in ignore_values: value = self.get_tag_value(tags, tag) if ignore_value.lower() in value.lower(): logging.debug(skip_msg.format(tag, value, resource)) return True return False def get_tag_value(self, tags, tag): if isinstance(tags, dict): return tags.get(tag, '') elif isinstance(tags, list): for t in tags: if t['Key'] == tag: return t['Value'] return '' def delete_resources_without_owner(self, dry_run): for account, s in self.sessions.items(): for rt in self.resource_types: for r in self.resources[account].get(rt + '_no_owner', []): logging.info(['delete_resource', account, rt, r]) if not dry_run: self.delete_resource(s, rt, r) def delete_resource(self, session, resource_type, resource_name): if resource_type == 's3': resource = session.resource(resource_type) self.delete_bucket(resource, resource_name) elif resource_type == 'sqs': client = session.client(resource_type) self.delete_queue(client, resource_name) elif resource_type == 'dynamodb': resource = session.resource(resource_type) self.delete_table(resource, resource_name) elif resource_type == 'rds': client = session.client(resource_type) self.delete_instance(client, resource_name) elif resource_type == 'rds_snapshots': client = session.client(resource_type) self.delete_snapshot(client, resource_name) else: raise InvalidResourceTypeError(resource_type) def delete_bucket(self, s3, bucket_name): bucket = s3.Bucket(bucket_name) for key in bucket.objects.all(): key.delete() bucket.delete() def delete_queue(self, sqs, queue_url): sqs.delete_queue(QueueUrl=queue_url) def delete_table(self, dynamodb, table_name): table = dynamodb.Table(table_name) table.delete() def delete_instance(self, rds, instance_name): rds.delete_db_instance(DBInstanceIdentifier=instance_name, SkipFinalSnapshot=True, DeleteAutomatedBackups=True) def delete_snapshot(self, rds, snapshot_identifier): rds.delete_db_snapshot(DBSnapshotIdentifier=snapshot_identifier) @staticmethod def determine_key_type(iam, user): tags = iam.list_user_tags(UserName=user)['Tags'] managed_by_integration_tag = \ [t['Value'] for t in tags if t['Key'] == 'managed_by_integration'] # if this key belongs to a user without tags, i.e. not # managed by an integration, this key is probably created # manually. disable it to leave a trace if not managed_by_integration_tag: return 'unmanaged' # if this key belongs to a user created by the # 'terraform-users' integration, we just delete the key if managed_by_integration_tag[0] == 'terraform_users': return 'user' # if this key belongs to a user created by the # 'terraform-resources' integration, we remove # the key from terraform state and let it create # a new one on its own if managed_by_integration_tag[0] == 'terraform_resources': return 'service_account' huh = 'unrecognized managed_by_integration tag: {}'.format( managed_by_integration_tag[0]) raise InvalidResourceTypeError(huh) def delete_keys(self, dry_run, keys_to_delete, working_dirs, disable_service_account_keys): error = False users_keys = self.get_users_keys() for account, s in self.sessions.items(): iam = s.client('iam') keys = keys_to_delete.get(account, []) for key in keys: user_and_user_keys = [ (user, user_keys) for user, user_keys in users_keys[account].items() if key in user_keys ] if not user_and_user_keys: continue # unpack single item from sequence # since only a single user can have a given key [user_and_user_keys] = user_and_user_keys user = user_and_user_keys[0] user_keys = user_and_user_keys[1] key_type = self.determine_key_type(iam, user) key_status = self.get_user_key_status(iam, user, key) if key_type == 'unmanaged' and key_status == 'Active': logging.info(['disable_key', account, user, key]) if not dry_run: iam.update_access_key(UserName=user, AccessKeyId=key, Status='Inactive') elif key_type == 'user': logging.info(['delete_key', account, user, key]) if not dry_run: iam.delete_access_key(UserName=user, AccessKeyId=key) elif key_type == 'service_account': # if key is disabled - delete it # this will happen after terraform-resources ran, # provisioned a new key, updated the output Secret, # recycled the pods and disabled the key. if key_status == 'Inactive': logging.info( ['delete_inactive_key', account, user, key]) if not dry_run: iam.delete_access_key(UserName=user, AccessKeyId=key) continue # if key is active and it is the only one - # remove it from terraform state. terraform-resources # will provision a new one. # may be a race condition here. TODO: check it if len(user_keys) == 1: logging.info(['remove_from_state', account, user, key]) if not dry_run: terraform.state_rm_access_key( working_dirs, account, user) # if user has 2 keys and we remove the key from # terraform state, terraform-resources will not # be able to provision a new key - limbo. # this state should happen when terraform-resources # is running, provisioned a new key, # but did not disable the old key yet. if len(user_keys) == 2: # if true, this is a call made by terraform-resources # itself. disable the key and proceed. the key will be # deleted in a following iteration of aws-iam-keys. if disable_service_account_keys: logging.info(['disable_key', account, user, key]) if not dry_run: iam.update_access_key(UserName=user, AccessKeyId=key, Status='Inactive') else: msg = \ 'user {} has 2 keys, skipping to avoid error' logging.error(msg.format(user)) error = True return error def get_users_keys(self): users_keys = {} for account, s in self.sessions.items(): iam = s.client('iam') users_keys[account] = { user: self.get_user_keys(iam, user) for user in self.users[account] } return users_keys def get_user_keys(self, iam, user): key_list = iam.list_access_keys(UserName=user)['AccessKeyMetadata'] return [uk['AccessKeyId'] for uk in key_list] @staticmethod def get_user_key_status(iam, user, key): key_list = iam.list_access_keys(UserName=user)['AccessKeyMetadata'] return [k['Status'] for k in key_list if k['AccessKeyId'] == key][0] def get_support_cases(self): all_support_cases = {} for account, s in self.sessions.items(): if not self.accounts[account].get('premiumSupport'): continue try: support = s.client('support') support_cases = support.describe_cases( includeResolvedCases=True, includeCommunications=True)['cases'] all_support_cases[account] = support_cases except Exception as e: msg = '[{}] error getting support cases. details: {}' logging.error(msg.format(account, str(e))) return all_support_cases def init_ecr_auth_tokens(self, accounts): accounts_with_ecr = [a for a in accounts if a.get('ecrs')] if not accounts_with_ecr: return auth_tokens = {} results = threaded.run(self.get_tf_secrets, accounts_with_ecr, self.thread_pool_size) account_secrets = {account: secret for account, secret in results} for account in accounts_with_ecr: account_name = account['name'] account_secret = account_secrets[account_name] access_key = account_secret['aws_access_key_id'] secret_key = account_secret['aws_secret_access_key'] ecrs = account['ecrs'] for ecr in ecrs: region_name = ecr['region'] session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region_name, ) client = session.client('ecr') token = client.get_authorization_token() auth_tokens[f"{account_name}/{region_name}"] = token self.auth_tokens = auth_tokens def get_cluster_vpc_id(self, account, route_tables=False): """ Returns a cluster VPC ID. :param account: a dictionary containing the following keys: - name - name of the AWS account - assume_role - role to assume to get access to the cluster's AWS account - assume_region - region in which to operate - assume_cidr - CIDR block of the cluster to use to find the matching VPC """ required_keys = \ ['name', 'assume_role', 'assume_region', 'assume_cidr'] ok = all(elem in account.keys() for elem in required_keys) if not ok: account_name = account.get('name') raise KeyError( '[{}] account is missing required keys'.format(account_name)) session = self.get_session(account['name']) sts = session.client('sts') role_arn = account['assume_role'] if not role_arn: raise KeyError('Could not find Role ARN. This is likely caused ' 'due to a missing awsInfrastructureAccess section.') role_name = role_arn.split('/')[1] response = sts.assume_role(RoleArn=role_arn, RoleSessionName=role_name) credentials = response['Credentials'] assumed_session = boto3.Session( aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'], region_name=account['assume_region']) assumed_ec2 = assumed_session.client('ec2') vpcs = assumed_ec2.describe_vpcs() vpc_id = None for vpc in vpcs.get('Vpcs'): if vpc['CidrBlock'] == account['assume_cidr']: vpc_id = vpc['VpcId'] break route_table_ids = None if route_tables and vpc_id: route_tables = assumed_ec2.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [vpc_id] }]) route_table_ids = [ rt['RouteTableId'] for rt in route_tables['RouteTables'] ] return vpc_id, route_table_ids def get_route53_zones(self): """ Return a list of (str, dict) representing Route53 DNS zones per account :return: route53 dns zones per account :rtype: list of (str, dict) """ return { account: self.resources.get(account, {}).get('route53', []) for account, _ in self.sessions.items() } def create_route53_zone(self, account_name, zone_name): """ Create a Route53 DNS zone :param account_name: the account name to operate on :param zone_name: name of the zone to create :type account_name: str :type zone_name: str """ session = self.get_session(account_name) client = session.client('route53') try: caller_ref = f"{datetime.now()}" client.create_hosted_zone( Name=zone_name, CallerReference=caller_ref, HostedZoneConfig={ 'Comment': 'Managed by App-Interface', }, ) except client.exceptions.InvalidDomainName: logging.error(f'[{account_name}] invalid domain name {zone_name}') except client.exceptions.HostedZoneAlreadyExists: logging.error( f'[{account_name}] hosted zone already exists: {zone_name}') except client.exceptions.TooManyHostedZones: logging.error(f'[{account_name}] too many hosted zones in account') except Exception as e: logging.error(f'[{account_name}] unhandled exception: {e}') def delete_route53_zone(self, account_name, zone_id): """ Delete a Route53 DNS zone :param account_name: the account name to operate on :param zone_id: aws zone id of the zone to delete :type account_name: str :type zone_id: str """ session = self.get_session(account_name) client = session.client('route53') try: client.delete_hosted_zone(Id=zone_id) except client.exceptions.NoSuchHostedZone: logging.error(f'[{account_name}] Error trying to delete ' f'unknown DNS zone {zone_id}') except client.exceptions.HostedZoneNotEmpty: logging.error(f'[{account_name}] Cannot delete DNS zone that ' f'is not empty {zone_id}') except Exception as e: logging.error(f'[{account_name}] unhandled exception: {e}') def delete_route53_record(self, account_name, zone_id, awsdata): """ Delete a Route53 DNS zone record :param account_name: the account name to operate on :param zone_id: aws zone id of the zone to operate on :param awsdata: aws record data of the record to delete :type account_name: str :type zone_id: str :type awsdata: dict """ session = self.get_session(account_name) client = session.client('route53') try: client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={ 'Changes': [{ 'Action': 'DELETE', 'ResourceRecordSet': awsdata, }] }) except client.exceptions.NoSuchHostedZone: logging.error(f'[{account_name}] Error trying to delete record: ' f'unknown DNS zone {zone_id}') except Exception as e: logging.error(f'[{account_name}] unhandled exception: {e}') def upsert_route53_record(self, account_name, zone_id, recordset): """ Upsert a Route53 DNS zone record :param account_name: the account name to operate on :param zone_id: aws zone id of the zone to operate on :param recordset: aws record data of the record to create or update :type account_name: str :type zone_id: str :type recordset: dict """ session = self.get_session(account_name) client = session.client('route53') try: client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={ 'Changes': [{ 'Action': 'UPSERT', 'ResourceRecordSet': recordset, }] }) except client.exceptions.NoSuchHostedZone: logging.error(f'[{account_name}] Error trying to delete record: ' f'unknown DNS zone {zone_id}') except Exception as e: logging.error(f'[{account_name}] unhandled exception: {e}')