def fetch_current_state(namespaces=None, clusters=None, thread_pool_size=None, integration=None, integration_version=None, override_managed_types=None, internal=None, use_jump_host=True, init_api_resources=False): ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, clusters=clusters, integration=integration, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_api_resources=init_api_resources) state_specs = \ init_specs_to_fetch( ri, oc_map, namespaces=namespaces, clusters=clusters, override_managed_types=override_managed_types ) threaded.run(populate_current_state, state_specs, thread_pool_size, ri=ri, integration=integration, integration_version=integration_version) return ri, oc_map
def run(thread_pool_size=10, defer=None): oc_map = tb.get_oc_map(QONTRACT_E2E_TEST) defer(lambda: oc_map.cleanup()) pattern = tb.get_namespaces_pattern() threaded.run(test_cluster, oc_map.clusters(), thread_pool_size, oc_map=oc_map, pattern=pattern)
def __init__(self, clusters=None, namespaces=None, integration='', e2e_test='', settings=None, internal=None, use_jump_host=True, thread_pool_size=1, init_projects=False, init_api_resources=False): self.oc_map = {} self.calling_integration = integration self.calling_e2e_test = e2e_test self.settings = settings self.internal = internal self.use_jump_host = use_jump_host self.thread_pool_size = thread_pool_size self.init_projects = init_projects self.init_api_resources = init_api_resources self._lock = Lock() if clusters and namespaces: raise KeyError('expected only one of clusters or namespaces.') elif clusters: threaded.run(self.init_oc_client, clusters, self.thread_pool_size) elif namespaces: clusters = [ns_info['cluster'] for ns_info in namespaces] threaded.run(self.init_oc_client, clusters, self.thread_pool_size) else: raise KeyError('expected one of clusters or namespaces.')
def run(thread_pool_size=10, defer=None): oc_map = tb.get_oc_map(QONTRACT_E2E_TEST) defer(lambda: oc_map.cleanup()) ns_under_test = tb.get_test_namespace_name() threaded.run(test_cluster, oc_map.clusters(), thread_pool_size, oc_map=oc_map, ns_under_test=ns_under_test)
def populate_desired_state(self, ri): results = threaded.run(self.init_populate_desired_state_specs, self.saas_files, self.thread_pool_size) desired_state_specs = \ [item for sublist in results for item in sublist] threaded.run(self.populate_desired_state_saas_file, desired_state_specs, self.thread_pool_size, ri=ri)
def run(dry_run, thread_pool_size=10): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_owner(server=instance['url']) threaded.run(act, repos, thread_pool_size, dry_run=dry_run, instance=instance, settings=settings)
def fetch_data(namespaces, thread_pool_size, internal, use_jump_host): ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host) state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces) threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri) return oc_map, ri
def run(dry_run, thread_pool_size=10, io_dir='throughput/'): saas_file_owners_diffs = read_saas_file_owners_diffs(io_dir) if len(saas_file_owners_diffs) == 0: return available_thread_pool_size = \ threaded.estimate_available_thread_pool_size( thread_pool_size, len(saas_file_owners_diffs)) threaded.run(osd_run_wrapper, saas_file_owners_diffs, thread_pool_size, dry_run=dry_run, available_thread_pool_size=available_thread_pool_size)
def run(self): clusters = queries.get_clusters() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=self.settings, use_jump_host=True, thread_pool_size=self.thread_pool_size) manifests = threaded.run(func=self._get_imagemanifestvuln, iterable=oc_map.clusters(), thread_pool_size=self.thread_pool_size, oc_map=oc_map) threaded.run(func=self._post, iterable=manifests, thread_pool_size=self.thread_pool_size)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10, enable_deletion=False, send_mails=False): settings = queries.get_app_interface_settings() users = queries.get_users() g = init_github() results = threaded.run(get_user_company, users, thread_pool_size, github=g) users_to_delete = get_users_to_delete(results) if not dry_run and enable_deletion: gw = prg.init(gitlab_project_id=gitlab_project_id) for user in users_to_delete: username = user['username'] paths = user['paths'] logging.info(['delete_user', username]) if not dry_run: if send_mails: send_email_notification(user, settings) elif enable_deletion: gw.create_delete_user_mr(username, paths) else: msg = ('\'delete\' action is not enabled. ' 'Please run the integration manually ' 'with the \'--enable-deletion\' flag.') logging.warning(msg)
def init_ecr_auth_tokens(self, accounts): accounts_with_ecr = [a for a in accounts if a.get('ecrs')] if not accounts_with_ecr: return auth_tokens = {} results = threaded.run(self.get_tf_secrets, accounts_with_ecr, self.thread_pool_size) account_secrets = {account: secret for account, secret in results} for account in accounts_with_ecr: account_name = account['name'] account_secret = account_secrets[account_name] access_key = account_secret['aws_access_key_id'] secret_key = account_secret['aws_secret_access_key'] ecrs = account['ecrs'] for ecr in ecrs: region_name = ecr['region'] session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region_name, ) client = session.client('ecr') token = client.get_authorization_token() auth_tokens[f"{account_name}/{region_name}"] = token self.auth_tokens = auth_tokens
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() aws = AWSApi(thread_pool_size, accounts, settings=settings) existing_keys = aws.get_users_keys() existing_keys_list = [ key for user_key in existing_keys.values() for keys in user_key.values() for key in keys ] logging.info('found {} existing keys'.format(len(existing_keys_list))) app_int_github_repos = queries.get_repos(server='https://github.com') all_repos = get_all_repos_to_scan(app_int_github_repos) logging.info('about to scan {} repos'.format(len(all_repos))) results = threaded.run(git_secrets.scan_history, all_repos, thread_pool_size, existing_keys=existing_keys_list) all_leaked_keys = [key for keys in results for key in keys] deleted_keys = aws_sos.get_deleted_keys(accounts) keys_to_delete = \ [{'account': account, 'key': key} for key in all_leaked_keys for account, user_keys in existing_keys.items() if key in [uk for uks in user_keys.values() for uk in uks] and key not in deleted_keys[account]] aws_sos.act(dry_run, gitlab_project_id, accounts, keys_to_delete)
def fetch_current_state(namespaces, thread_pool_size, internal, use_jump_host): ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host) state_specs = \ ob.init_specs_to_fetch( ri, oc_map, namespaces=namespaces, override_managed_types=['Secret'] ) threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri) return ri, oc_map
def init_specs(self): wd_specs = \ [{'name': name, 'wd': wd} for name, wd in self.working_dirs.items()] results = threaded.run(self.terraform_init, wd_specs, self.thread_pool_size) self.specs = \ [{'name': name, 'tf': tf} for name, tf in results]
def apply(self): errors = False results = threaded.run(self.terraform_apply, self.specs, self.thread_pool_size) for error in results: if error: errors = True return errors
def fetch_current_state(thread_pool_size, internal, use_jump_host): gqlapi = gql.get_api() clusters = gqlapi.query(CLUSTERS_QUERY)['clusters'] clusters = [c for c in clusters if c.get('ocm') is None] settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host) results = threaded.run(get_cluster_users, oc_map.clusters(), thread_pool_size, oc_map=oc_map) current_state = [item for sublist in results for item in sublist] return oc_map, current_state
def _check_images(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] html_url = options['html_url'] resources = options['resources'] image_auth = options['image_auth'] image_patterns = options['image_patterns'] error_prefix = \ f"[{saas_file_name}/{resource_template_name}] {html_url}:" images_list = threaded.run(self._collect_images, resources, self.available_thread_pool_size) images = set([item for sublist in images_list for item in sublist]) if not images: return False # no errors errors = threaded.run(self._check_image, images, self.available_thread_pool_size, image_patterns=image_patterns, image_auth=image_auth, error_prefix=error_prefix) error = True in errors return error
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): oc_map, desired_state = get_desired_state(internal, use_jump_host, thread_pool_size) defer(lambda: oc_map.cleanup()) results = threaded.run(check_ns_exists, desired_state, thread_pool_size, oc_map=oc_map) specs_to_create = [spec for spec, create in results if create] for spec in specs_to_create: logging.info(['create', spec['cluster'], spec['namespace']]) if not dry_run: create_new_project(spec, oc_map)
def run(dry_run=False, thread_pool_size=10): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) repos = queries.get_repos(server=gl.server) app_sre = gl.get_app_sre_group_users() results = threaded.run(get_members_to_add, repos, thread_pool_size, gl=gl, app_sre=app_sre) members_to_add = [item for sublist in results for item in sublist] for m in members_to_add: logging.info(['add_maintainer', m["repo"], m["user"].username]) if not dry_run: gl.add_project_member(m["repo"], m["user"])
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters(minimal=True) settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) results = threaded.run(get_cluster_users, oc_map.clusters(), thread_pool_size, oc_map=oc_map) current_state = [item for sublist in results for item in sublist] return oc_map, current_state
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters() clusters = [c for c in clusters if c.get('ocm') is None] current_state = [] settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host) groups_list = create_groups_list(clusters, oc_map) results = threaded.run(get_cluster_state, groups_list, thread_pool_size, oc_map=oc_map) current_state = [item for sublist in results for item in sublist] return oc_map, current_state
def init_sessions_and_resources(self, accounts): results = threaded.run(self.get_tf_secrets, accounts, self.thread_pool_size) self.sessions = {} self.resources = {} for account, secret in results: access_key = secret['aws_access_key_id'] secret_key = secret['aws_secret_access_key'] region_name = secret['region'] session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region_name, ) self.sessions[account] = session self.resources[account] = {}
def plan(self, enable_deletion): errors = False deletions_detected = False results = threaded.run(self.terraform_plan, self.specs, self.thread_pool_size, enable_deletion=enable_deletion) self.deleted_users = [] for deletion_detected, deleted_users, error in results: if error: errors = True if deletion_detected: deletions_detected = True self.deleted_users.extend(deleted_users) return deletions_detected, errors
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): users = init_users() user_specs = threaded.run(init_user_spec, users, thread_pool_size) users_to_delete = [(username, paths) for username, delete, paths in user_specs if delete] if not dry_run: mr_cli = mr_client_gateway.init(gitlab_project_id=gitlab_project_id, sqs_or_gitlab='gitlab') for username, paths in users_to_delete: logging.info(['delete_user', username]) if not dry_run: mr = CreateDeleteUser(username, paths) mr.submit(cli=mr_cli)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10): users = init_users() user_specs = threaded.run(init_user_spec, users, thread_pool_size) users_to_delete = [(username, paths) for username, delete, paths in user_specs if delete] if not dry_run: instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) for username, paths in users_to_delete: logging.info(['delete_user', username]) if not dry_run: gl.create_delete_user_mr(username, paths)
def run(dry_run, thread_pool_size=10, cluster_name=None): orb.QONTRACT_INTEGRATION = QONTRACT_INTEGRATION orb.QONTRACT_INTEGRATION_VERSION = QONTRACT_INTEGRATION_VERSION gqlapi = gql.get_api() rules_paths = queries.get_prometheus_rules_paths() rules = [] for n in gqlapi.query(orb.NAMESPACES_QUERY)['namespaces']: if cluster_name and n['cluster']['name'] != cluster_name: continue if not n['managedResourceTypes'] or \ 'PrometheusRule' not in n['managedResourceTypes']: continue openshift_resources = n.get('openshiftResources') if not openshift_resources: logging.warning("No openshiftResources defined for namespace" f"{n['name']} in cluster {n['cluster']['name']}") continue for r in openshift_resources: if r['path'] not in rules_paths: continue openshift_resource = orb.fetch_openshift_resource(r, n) rules.append({ 'path': r['path'], 'spec': openshift_resource.body['spec'], 'namespace': n['name'], 'cluster': n['cluster']['name'] }) failed = [ r for r in threaded.run(check_rule, rules, thread_pool_size) if not r['check_result'] ] if failed: for f in failed: logging.warning(f"Error in rule {f['path']} from namespace " f"{f['namespace']} in cluster {f['cluster']}: " f"{f['check_result']}") sys.exit(1)
def run(dry_run, thread_pool_size=10, io_dir='throughput/'): saas_file_owners_diffs = read_saas_file_owners_diffs(io_dir) if len(saas_file_owners_diffs) == 0: return available_thread_pool_size = \ threaded.estimate_available_thread_pool_size( thread_pool_size, len(saas_file_owners_diffs)) exit_codes = threaded.run( osd_run_wrapper, saas_file_owners_diffs, thread_pool_size, dry_run=dry_run, available_thread_pool_size=available_thread_pool_size ) if [ec for ec in exit_codes if ec]: sys.exit(1)
def get_moving_commits_diff(self, dry_run): results = threaded.run(self.get_moving_commits_diff_saas_file, self.saas_files, self.thread_pool_size, dry_run=dry_run) return [item for sublist in results for item in sublist]
def map_resources(self): threaded.run(self.map_resource, self.resource_types, self.thread_pool_size)
def get_configs_diff(self): results = threaded.run(self.get_configs_diff_saas_file, self.saas_files, self.thread_pool_size) return [item for sublist in results for item in sublist]