def get_saas_files(saas_file_name=None, env_name=None, app_name=None): """ Returns SaasFile resources defined in app-interface """ gqlapi = gql.get_api() saas_files = gqlapi.query(SAAS_FILES_QUERY)['saas_files'] if saas_file_name is None and env_name is None and app_name is None: return saas_files if saas_file_name == '' or env_name == '' or app_name == '': return [] for saas_file in saas_files[:]: if saas_file_name: if saas_file['name'] != saas_file_name: saas_files.remove(saas_file) continue if env_name: resource_templates = saas_file['resourceTemplates'] for rt in resource_templates[:]: targets = rt['targets'] for target in targets[:]: namespace = target['namespace'] environment = namespace['environment'] if environment['name'] != env_name: targets.remove(target) if not targets: resource_templates.remove(rt) if not resource_templates: saas_files.remove(saas_file) continue if app_name: if saas_file['app']['name'] != app_name: saas_files.remove(saas_file) continue return saas_files
def fetch_desired_state(oc_map): gqlapi = gql.get_api() roles = gqlapi.query(ROLES_QUERY)['roles'] desired_state = [] for r in roles: if not openshift_rolebindings \ .has_valid_expiration_date(r['expirationDate']): raise ValueError( f'expirationDate field is not formatted as YYYY-MM-DD, ' f'currently set as {r["expirationDate"]}' ) if not openshift_rolebindings.role_still_valid(r['expirationDate']): logging.warning( f'The maximum expiration date of {r["name"]} ' f'has passed ' ) continue for a in r['access'] or []: if None in [a['cluster'], a['group']]: continue if oc_map and a['cluster']['name'] not in oc_map.clusters(): continue for u in r['users']: if u['github_username'] is None: continue desired_state.append({ "cluster": a['cluster']['name'], "group": a['group'], "user": u['github_username'] }) return desired_state
def fetch_desired_state(): gqlapi = gql.get_api() result = gqlapi.query(QUAY_ORG_QUERY) state = AggregatedList() for role in result['roles']: permissions = [ process_permission(p) for p in role['permissions'] if p.get('service') == 'quay-membership' ] if permissions: members = [] def append_quay_username_members(member): quay_username = member.get('quay_username') if quay_username: members.append(quay_username) for user in role['users']: append_quay_username_members(user) for bot in role['bots']: append_quay_username_members(bot) for p in permissions: state.add(p, members) return state
def fetch_desired_state(): gqlapi = gql.get_api() result = gqlapi.query(QUAY_REPOS_QUERY) state = AggregatedList() for app in result['apps']: quay_repos = app.get('quayRepos') if quay_repos is None: continue for quay_repo in quay_repos: name = quay_repo['org']['name'] for repo in quay_repo['items']: params = {'org': name, 'repo': repo['name']} # Avoiding duplicates try: state.get(params) logging.error(['Repository %s/%s defined more than once'], params['org'], params['repo']) sys.exit(ExitCodes.ERROR) except KeyError: pass item = { 'public': repo['public'], 'description': repo['description'].strip() } state.add(params, item) return state
def get_desired_state(): gqlapi = gql.get_api() roles = gqlapi.query(ROLES_QUERY)['roles'] desired_state = [] for r in roles: for p in r['permissions']: if p['service'] != 'jenkins-role': continue for u in r['users']: desired_state.append({ "instance": p['instance']['name'], "role": p['role'], "user": u['org_username'] }) for u in r['bots']: if u['org_username'] is None: continue desired_state.append({ "instance": p['instance']['name'], "role": p['role'], "user": u['org_username'] }) return desired_state
def get_desired_state(internal, use_jump_host, thread_pool_size): gqlapi = gql.get_api() all_namespaces = gqlapi.query(QUERY)['namespaces'] namespaces = [] for namespace in all_namespaces: shard_key = f'{namespace["cluster"]["name"]}/{namespace["name"]}' if is_in_shard(shard_key): namespaces.append(namespace) ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces, override_managed_types=['Namespace']) desired_state = [] for cluster, namespace, _, _ in ri: if cluster not in oc_map.clusters(): continue desired_state.append({"cluster": cluster, "namespace": namespace}) return oc_map, desired_state
def get_desired_state(slack_map): gqlapi = gql.get_api() roles = gqlapi.query(ROLES_QUERY)['roles'] all_users = queries.get_users() desired_state = [] for r in roles: for p in r['permissions']: if p['service'] != 'slack-usergroup': continue workspace = p['workspace'] managed_usergroups = workspace['managedUsergroups'] if managed_usergroups is None: continue workspace_name = workspace['name'] usergroup = p['handle'] description = p['description'] if usergroup not in managed_usergroups: logging.warning( '[{}] usergroup {} not in managed usergroups {}'.format( workspace_name, usergroup, managed_usergroups)) continue slack = slack_map[workspace_name]['slack'] ugid = slack.get_usergroup_id(usergroup) user_names = [get_slack_username(u) for u in r['users']] slack_usernames_pagerduty = \ get_slack_usernames_from_pagerduty(p['pagerduty'], all_users, usergroup) user_names.extend(slack_usernames_pagerduty) slack_usernames_repo = get_slack_usernames_from_owners( p['ownersFromRepos'], all_users, usergroup) user_names.extend(slack_usernames_repo) users = slack.get_users_by_names(user_names) channel_names = [] if p['channels'] is None else p['channels'] channels = slack.get_channels_by_names(channel_names) existing_items = [ i for i in desired_state if i['workspace'] == workspace_name and i['usergroup'] == usergroup ] if len(existing_items) == 1: existing_items[0]['users'].update(users) else: desired_state.append({ "workspace": workspace_name, "usergroup": usergroup, "usergroup_id": ugid, "users": users, "channels": channels, "description": description, }) return desired_state
def setup(dry_run, print_only, thread_pool_size, internal, use_jump_host, account_name): gqlapi = gql.get_api() accounts = queries.get_aws_accounts() if account_name: accounts = [n for n in accounts if n['name'] == account_name] if not accounts: raise ValueError(f"aws account {account_name} is not found") settings = queries.get_app_interface_settings() namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces'] tf_namespaces = [ namespace_info for namespace_info in namespaces if namespace_info.get('managedTerraformResources') ] ri, oc_map = fetch_current_state(dry_run, tf_namespaces, thread_pool_size, internal, use_jump_host) ts, working_dirs = init_working_dirs(accounts, thread_pool_size, oc_map=oc_map, settings=settings) tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, QONTRACT_TF_PREFIX, accounts, working_dirs, thread_pool_size) existing_secrets = tf.get_terraform_output_secrets() ts.populate_resources(tf_namespaces, existing_secrets, account_name) ts.dump(print_only, existing_dirs=working_dirs) return ri, oc_map, tf, tf_namespaces
def run(dry_run): settings = queries.get_app_interface_settings() gqlapi = gql.get_api() github = init_github() secret_reader = SecretReader(settings=settings) # Reconcile against all sentry instances instances = gqlapi.query(SENTRY_INSTANCES_QUERY)["instances"] tokens = { i["name"]: secret_reader.read(i["automationToken"]) for i in instances } skip_users = { i["name"]: secret_reader.read(i["adminUser"]) for i in instances } for instance in instances: instance_name = instance["name"] token = tokens[instance_name] host = instance["consoleUrl"] sentry_client = SentryClient(host, token) skip_user = skip_users[instance_name] current_state = fetch_current_state(sentry_client, [skip_user]) desired_state = fetch_desired_state(gqlapi, instance, github) reconciler = SentryReconciler(sentry_client, dry_run) reconciler.reconcile(current_state, desired_state)
def get_config(default=False): gqlapi = gql.get_api() orgs = gqlapi.query(ORGS_QUERY)["orgs"] settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) config = {"github": {}} found_defaults = [] for org in orgs: org_name = org["name"] if org.get("default"): found_defaults.append(org_name) elif default: continue token = secret_reader.read(org["token"]) org_config = {"token": token, "managed_teams": org["managedTeams"]} config["github"][org_name] = org_config if default: if len(found_defaults) == 0: raise KeyError("default github org config not found") if len(found_defaults) > 1: raise KeyError("multiple default github org configs found: " f"{found_defaults}") return config
def fetch_desired_state(): gqlapi = gql.get_api() result = gqlapi.query(QUAY_ORG_QUERY) state = AggregatedList() for role in result['roles']: permissions = list(filter( lambda p: p.get('service') == 'quay-membership', role['permissions'] )) if permissions: members = [] def append_quay_username_members(member): quay_username = member.get('quay_username') if quay_username: members.append(quay_username) for user in role['users']: append_quay_username_members(user) for bot in role['bots']: append_quay_username_members(bot) list(map(lambda p: state.add(p, members), permissions)) return state
def __init__(self, dry_run=False): self.dry_run = dry_run self.gqlapi = gql.get_api() settings = queries.get_app_interface_settings() self.secret_reader = SecretReader(settings=settings) self.skopeo_cli = Skopeo(dry_run) self.push_creds = self._get_push_creds()
def fetch_desired_state(): gqlapi = gql.get_api() roles = expiration.filter(gqlapi.query(QUAY_ORG_QUERY)['roles']) state = AggregatedList() for role in roles: permissions = [ process_permission(p) for p in role['permissions'] if p.get('service') == 'quay-membership' ] if permissions: members = [] for user in role['users'] + role['bots']: quay_username = user.get('quay_username') if quay_username: members.append(quay_username) for p in permissions: state.add(p, members) return state
def validate_diffs(diffs): gqlapi = gql.get_api() clusters_query = gqlapi.query(GROUPS_QUERY)['clusters'] desired_combos = [{ "cluster": diff['cluster'], "group": diff['group'] } for diff in diffs] desired_combos_unique = [] [ desired_combos_unique.append(item) for item in desired_combos if item not in desired_combos_unique ] valid_combos = [{ "cluster": cluster['name'], "group": group } for cluster in clusters_query for group in cluster['managedGroups'] or []] invalid_combos = [ item for item in desired_combos_unique if item not in valid_combos ] if len(invalid_combos) != 0: for combo in invalid_combos: msg = ('invalid cluster/group combination: {}/{}' ' (hint: should be added to managedGroups)').format( combo['cluster'], combo['group']) logging.error(msg) sys.exit(1)
def get_tf_resource_info(namespace, identifier): """ Extracting the terraformResources information from the namespace for a given identifier :param namespace: the namespace dictionary :param identifier: the identifier we are looking for :return: the terraform resource information dictionary """ tf_resources = namespace['terraformResources'] for tf_resource in tf_resources: if 'identifier' not in tf_resource: continue if tf_resource['identifier'] != identifier: continue if tf_resource['provider'] != 'rds': continue defaults_ref = gql.get_api().get_resource(tf_resource['defaults']) defaults = yaml.safe_load(defaults_ref['content']) output_resource_name = tf_resource['output_resource_name'] if output_resource_name is None: output_resource_name = (f'{tf_resource["identifier"]}-' f'{tf_resource["provider"]}') return { 'cluster': namespace['cluster']['name'], 'output_resource_name': output_resource_name, 'engine': defaults.get('engine', 'postgres'), 'engine_version': defaults.get('engine_version', 'latest'), }
def run(dry_run): settings = queries.get_app_interface_settings() dependency_map = settings.get('dependencies') if not dependency_map: sys.exit() gqlapi = gql.get_api() apps = gqlapi.query(APPS_QUERY)['apps'] error = False for app in apps: app_name = app['name'] app_deps = app.get('dependencies') current_deps = \ [a['name'] for a in app_deps] if app_deps else [] desired_deps = get_desired_dependency_names(app, dependency_map) missing_deps = list(desired_deps.difference(current_deps)) if missing_deps: error = True msg = f"App '{app_name}' has missing dependencies: {missing_deps}" logging.error(msg) redundant_deps = list(set(current_deps).difference(desired_deps)) if redundant_deps: msg = \ f"App '{app_name}' has redundant dependencies: " + \ f"{redundant_deps}" logging.debug(msg) if error: sys.exit(1)
def get_namespaces(minimal=False): """ Returns all Namespaces """ gqlapi = gql.get_api() if minimal: return gqlapi.query(NAMESPACES_MINIMAL_QUERY)['namespaces'] else: return gqlapi.query(NAMESPACES_QUERY)['namespaces']
def load_tkn_template(path: str, variables: dict[str, str]) -> dict[str, Any]: '''Fetches a yaml resource from qontract-server and parses it''' resource = gql.get_api().get_resource(path) body = jinja2.Template(resource['content'], undefined=jinja2.StrictUndefined).render(variables) return yaml.safe_load(body)
def get_github_instance(): """ Returns a single Github instance """ gqlapi = gql.get_api() instances = gqlapi.query(GITHUB_INSTANCE_QUERY)['instances'] for instance in instances: if instance['url'] == "https://github.com/app-sre": return instance
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): gqlapi = gql.get_api() namespaces = [] for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']: if not namespace_info.get('networkPoliciesAllow'): continue shard_key = (f"{namespace_info['cluster']['name']}/" f"{namespace_info['name']}") if not is_in_shard(shard_key): continue namespaces.append(namespace_info) ri, oc_map = ob.fetch_current_state( namespaces=namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, override_managed_types=['NetworkPolicy'], internal=internal, use_jump_host=use_jump_host) defer(lambda: oc_map.cleanup()) fetch_desired_state(namespaces, ri, oc_map) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, providers=[], cluster_name=None, namespace_name=None, init_api_resources=False, defer=None): gqlapi = gql.get_api() namespaces = [ namespace_info for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces'] if is_in_shard(f"{namespace_info['cluster']['name']}/" + f"{namespace_info['name']}") ] namespaces = \ filter_namespaces_by_cluster_and_namespace( namespaces, cluster_name, namespace_name ) namespaces = canonicalize_namespaces(namespaces, providers) oc_map, ri = \ fetch_data(namespaces, thread_pool_size, internal, use_jump_host, init_api_resources=init_api_resources) defer(lambda: oc_map.cleanup()) ob.realize_data(dry_run, oc_map, ri) if ri.has_error_registered(): sys.exit(1) return ri
def setup(dry_run, print_only, thread_pool_size, internal, use_jump_host, account_name, extra_labels): gqlapi = gql.get_api() accounts = queries.get_aws_accounts() if account_name: accounts = [n for n in accounts if n['name'] == account_name] if not accounts: raise ValueError(f"aws account {account_name} is not found") extra_labels['shard_key'] = account_name settings = queries.get_app_interface_settings() namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces'] tf_namespaces = filter_tf_namespaces(namespaces, account_name) ri, oc_map = fetch_current_state(dry_run, tf_namespaces, thread_pool_size, internal, use_jump_host, account_name) ts, working_dirs = init_working_dirs(accounts, thread_pool_size, oc_map=oc_map, settings=settings) tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, QONTRACT_TF_PREFIX, accounts, working_dirs, thread_pool_size) existing_secrets = tf.get_terraform_output_secrets() clusters = [c for c in queries.get_clusters() if c.get('ocm') is not None] ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings) ts.populate_resources(tf_namespaces, existing_secrets, account_name, ocm_map=ocm_map) ts.dump(print_only, existing_dirs=working_dirs) return ri, oc_map, tf, tf_namespaces
def get_clusters_by(filter: ClusterFilter, minimal: bool = False) -> list[dict]: """ Returns all Clusters fitting given filter """ gqlapi = gql.get_api() tmpl = CLUSTERS_MINIMAL_QUERY if minimal else CLUSTERS_QUERY query = Template(tmpl).render(filter=filter, ) return gqlapi.query(query)['clusters']
def get_app_interface_settings(): """ Returns App Interface settings """ gqlapi = gql.get_api() settings = gqlapi.query(APP_INTERFACE_SETTINGS_QUERY)['settings'] if settings: # assuming a single settings file for now return settings[0] return None
def get_slack_workspace(): """ Returns a single Slack workspace """ gqlapi = gql.get_api() slack_workspaces = \ gqlapi.query(SLACK_WORKSPACES_QUERY)['slack_workspaces'] if len(slack_workspaces) != 1: logging.warning('multiple Slack workspaces found.') return gqlapi.query(SLACK_WORKSPACES_QUERY)['slack_workspaces'][0]
def collect_configs(): gqlapi = gql.get_api() raw_jjb_configs = gqlapi.query(QUERY)['jenkins_configs'] saas_file_configs, settings, saas_file_repo_urls = \ collect_saas_file_configs() configs = raw_jjb_configs + saas_file_configs return configs, settings, saas_file_repo_urls
def get_users(refs=False): """ Returnes all Users. """ gqlapi = gql.get_api() query = Template(USERS_QUERY).render( filter=None, refs=refs, ) return gqlapi.query(query)['users']
def get_replicas_info(namespaces): replicas_info = defaultdict(dict) for tf_namespace in namespaces: tf_resources = tf_namespace.get('terraformResources') if tf_resources is None: continue for tf_resource in tf_namespace['terraformResources']: # First, we have to find the terraform resources # that have a replica_source defined in app-interface replica_src = tf_resource.get('replica_source') if replica_src is None: # When replica_source is not there, we look for # replicate_source_db in the defaults replica_src_db = None defaults_ref = tf_resource.get('defaults') if defaults_ref is not None: defaults_res = gql.get_api().get_resource( defaults_ref ) defaults = yaml.safe_load(defaults_res['content']) replica_src_db = defaults.get('replicate_source_db') # Also, we look for replicate_source_db in the overrides override_replica_src_db = None overrides = tf_resource.get('overrides') if overrides is not None: override_replica_src_db = json.loads(overrides).get( 'replicate_source_db' ) if override_replica_src_db is not None: replica_src_db = override_replica_src_db # Getting whatever we probed here replica_src = replica_src_db if replica_src is None: # No replica source information anywhere continue # The replica name, as found in the # self.format_output() replica_name = (f'{tf_resource.get("identifier")}-' f'{tf_resource.get("provider")}') # The replica source name, as found in the # self.format_output() replica_source_name = (f'{replica_src}-' f'{tf_resource.get("provider")}') # Creating a dict that is convenient to use inside the # loop processing the formatted_output tf_account = tf_resource.get('account') replicas_info[tf_account][replica_name] = replica_source_name return replicas_info
def get_prometheus_tests(): """Returns a path indexed dict with the prometheus tests content""" gqlapi = gql.get_api() tests = {} for t in gqlapi.query(PROMETHEUS_RULES_TESTS_QUERY)["tests"]: # This can be a jinja template. We cannot load the yaml here tests[t["path"]] = t["content"] return tests
def get_known_hosts(self, jh): known_hosts_path = jh['knownHosts'] gqlapi = gql.get_api() try: known_hosts = gqlapi.get_resource(known_hosts_path) except gql.GqlGetResourceError as e: raise FetchResourceError(str(e)) return known_hosts['content']