def root_owner(ctx, cluster, namespace, kind, name): settings = queries.get_app_interface_settings() clusters = [ c for c in queries.get_clusters(minimal=True) if c['name'] == cluster ] oc_map = OC_Map(clusters=clusters, integration='qontract-cli', thread_pool_size=1, settings=settings, init_api_resources=True) oc = oc_map.get(cluster) obj = oc.get(namespace, kind, name) root_owner = oc.get_obj_root_owner(namespace, obj, allow_not_found=True, allow_not_controller=True) # TODO(mafriedm): fix this # do not sort ctx.obj['options']['sort'] = False # a bit hacky, but ¯\_(ツ)_/¯ if ctx.obj['options']['output'] != 'json': ctx.obj['options']['output'] = 'yaml' print_output(ctx.obj['options'], root_owner)
def get_desired_state(internal, use_jump_host, thread_pool_size): gqlapi = gql.get_api() all_namespaces = gqlapi.query(QUERY)['namespaces'] namespaces = [] for namespace in all_namespaces: shard_key = f'{namespace["cluster"]["name"]}/{namespace["name"]}' if is_in_shard(shard_key): namespaces.append(namespace) ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces, override_managed_types=['Namespace']) desired_state = [] for cluster, namespace, _, _ in ri: if cluster not in oc_map.clusters(): continue desired_state.append({"cluster": cluster, "namespace": namespace}) return oc_map, desired_state
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")] oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = slackapi_from_queries(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get( namespace="openshift-managed-upgrade-operator", kind="UpgradeConfig", allow_not_found=True, )["items"] if not upgrade_config: logging.debug(f"[{cluster}] UpgradeConfig not found.") continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config["spec"] upgrade_at = upgrade_spec["upgradeAt"] version = upgrade_spec["desired"]["version"] upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ") state_key = f"{cluster}-{upgrade_at}" # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(["cluster_upgrade", cluster]) if not dry_run: state.add(state_key) usergroup = f"{cluster}-cluster" usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f"Heads up <!subteam^{usergroup_id}>! " + f"cluster `{cluster}` is currently " + f"being upgraded to version `{version}`")
def test_clusters_errors_with_include_errors(self, mock_secret_reader, mock_oc): """ With the include_errors kwarg set to true, clusters that didn't initialize a client are still included. """ cluster_1 = { "name": "test-1", "serverUrl": "http://localhost", } cluster_2 = { "name": "test-2", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } cluster_names = [cluster_1["name"], cluster_2["name"]] oc_map = OC_Map(clusters=[cluster_1, cluster_2]) self.assertEqual(oc_map.clusters(include_errors=True), cluster_names) self.assertIsInstance(oc_map.oc_map.get(cluster_1["name"]), OCLogMsg)
def test_clusters_errors_with_include_errors(self, mock_secret_reader, mock_oc): """ With the include_errors kwarg set to true, clusters that didn't initialize a client are still included. """ cluster_1 = { 'name': 'test-1', 'serverUrl': 'http://localhost', } cluster_2 = { 'name': 'test-2', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } cluster_names = [cluster_1['name'], cluster_2['name']] oc_map = OC_Map(clusters=[cluster_1, cluster_2]) self.assertEqual(oc_map.clusters(include_errors=True), cluster_names) self.assertIsInstance(oc_map.oc_map.get(cluster_1['name']), OCLogMsg)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')] oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator', kind='UpgradeConfig', allow_not_found=True)['items'] if not upgrade_config: logging.debug(f'[{cluster}] UpgradeConfig not found.') continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config['spec'] upgrade_at = upgrade_spec['upgradeAt'] version = upgrade_spec['desired']['version'] upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ') state_key = f'{cluster}-{upgrade_at}' # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(['cluster_upgrade', cluster]) if not dry_run: state.add(state_key) usergroup = f'{cluster}-cluster' usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f'Heads up <!subteam^{usergroup_id}>! ' + f'cluster `{cluster}` is currently ' + f'being upgraded to version `{version}`')
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters(minimal=True) settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) results = threaded.run(get_cluster_users, oc_map.clusters(), thread_pool_size, oc_map=oc_map) current_state = [item for sublist in results for item in sublist] return oc_map, current_state
def test_clusters_errors_empty_return(self, mock_secret_reader): """ clusters() shouldn't return the names of any clusters that didn't initialize a client successfully. """ cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', } oc_map = OC_Map(clusters=[cluster]) self.assertEqual(oc_map.clusters(), []) self.assertIsInstance(oc_map.oc_map.get(cluster['name']), OCLogMsg)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() pipelines_providers = queries.get_pipelines_providers() tkn_namespaces = [ pp['namespace'] for pp in pipelines_providers if pp['provider'] == Providers.TEKTON ] oc_map = OC_Map(namespaces=tkn_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(lambda: oc_map.cleanup()) for pp in pipelines_providers: retention = pp.get('retention') if not retention: continue if pp['provider'] == Providers.TEKTON: ns_info = pp['namespace'] namespace = ns_info['name'] cluster = ns_info['cluster']['name'] oc = oc_map.get(cluster) pipeline_runs = sorted( oc.get(namespace, 'PipelineRun')['items'], key=lambda k: k['metadata']['creationTimestamp']) retention_min = retention.get('minimum') if retention_min: pipeline_runs = pipeline_runs[retention_min:] retention_days = retention.get('days') for pr in pipeline_runs: name = pr['metadata']['name'] if retention_days and \ within_retention_days(pr, retention_days): continue logging.info([ 'delete_trigger', cluster, namespace, 'PipelineRun', name ]) if not dry_run: oc.delete(namespace, 'PipelineRun', name)
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters(minimal=True) settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) results = threaded.run(get_cluster_users, oc_map.clusters(include_errors=True), thread_pool_size, oc_map=oc_map) current_state = list(itertools.chain.from_iterable(results)) return oc_map, current_state
def fetch_current_state(namespaces=None, clusters=None, thread_pool_size=None, integration=None, integration_version=None, override_managed_types=None, internal=None, use_jump_host=True, init_api_resources=False): ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, clusters=clusters, integration=integration, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_api_resources=init_api_resources) state_specs = \ init_specs_to_fetch( ri, oc_map, namespaces=namespaces, clusters=clusters, override_managed_types=override_managed_types ) threaded.run(populate_current_state, state_specs, thread_pool_size, ri=ri, integration=integration, integration_version=integration_version) return ri, oc_map
def fetch_current_state(dry_run, namespaces, thread_pool_size, internal, use_jump_host, account_name): ri = ResourceInventory() if dry_run: return ri, None settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) state_specs = \ ob.init_specs_to_fetch( ri, oc_map, namespaces=namespaces, override_managed_types=['Secret'] ) threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri, account_name=account_name) return ri, oc_map
def run(dry_run: bool, thread_pool_size=10, internal: Optional[bool] = None, use_jump_host=True, defer=None): all_namespaces = queries.get_namespaces(minimal=True) shard_namespaces, duplicates = get_shard_namespaces(all_namespaces) desired_state = get_desired_state(shard_namespaces) settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=shard_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) defer(oc_map.cleanup) results = threaded.run(manage_namespaces, desired_state, thread_pool_size, return_exceptions=True, dry_run=dry_run, oc_map=oc_map) err = check_results(desired_state, results) if err or duplicates: sys.exit(ExitCodes.ERROR)
def manage_namespaces(spec: Mapping[str, str], oc_map: OC_Map, dry_run: bool) -> None: cluster = spec['cluster'] namespace = spec['namespace'] desired_state = spec["desired_state"] oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) return None act = { NS_ACTION_CREATE: oc.new_project, NS_ACTION_DELETE: oc.delete_project } exists = oc.project_exists(namespace) action = None if not exists and desired_state == NS_STATE_PRESENT: action = NS_ACTION_CREATE elif exists and desired_state == NS_STATE_ABSENT: action = NS_ACTION_DELETE if action: logging.info([action, cluster, namespace]) if not dry_run: act[action](namespace)
def fetch_data( namespaces, thread_pool_size, internal, use_jump_host, init_api_resources=False, overrides=None, ): ri = ResourceInventory() settings = queries.get_app_interface_settings() logging.debug(f"Overriding keys {overrides}") oc_map = OC_Map( namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_api_resources=init_api_resources, ) state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces, override_managed_types=overrides) threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri) return oc_map, ri
def test_disabled_integration(self, selfmock_secret_reader, mock_oc): calling_int = "calling_integration" cluster = { "name": "cl1", "serverUrl": "http://localhost", "disable": { "integrations": [calling_int.replace("_", "-")] }, "automationToken": { "path": "some-path", "field": "some-field" }, } namespace = {"name": "ns1", "cluster": cluster} oc_map = OC_Map(integration=calling_int, namespaces=[namespace]) self.assertFalse(oc_map.get(cluster["name"]))
def test_disabled_integration(self, selfmock_secret_reader, mock_oc): calling_int = 'calling_integration' cluster = { 'name': 'cl1', 'serverUrl': 'http://localhost', 'disable': { 'integrations': [calling_int.replace('_', '-')] }, 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } namespace = {'name': 'ns1', 'cluster': cluster} oc_map = OC_Map(integration=calling_int, namespaces=[namespace]) self.assertFalse(oc_map.get(cluster['name']))
def test_missing_automationtoken(self): """ When a cluster with a missing automationToken is passed into OC_Map, it should be skipped. """ cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', 'automationToken': None } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] has no automation token') self.assertEqual(len(oc_map.clusters()), 0)
def run(self): clusters = queries.get_clusters() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=self.settings, use_jump_host=True, thread_pool_size=self.thread_pool_size) manifests = threaded.run(func=self._get_imagemanifestvuln, iterable=oc_map.clusters(), thread_pool_size=self.thread_pool_size, oc_map=oc_map) threaded.run(func=self._post, iterable=manifests, thread_pool_size=self.thread_pool_size)
def run(dry_run, vault_output_path): """Get Hive ClusterDeployments from clusters and save mapping to Vault""" if not vault_output_path: logging.error('must supply vault output path') sys.exit(ExitCodes.ERROR) clusters = queries.get_clusters() settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, thread_pool_size=1, settings=settings, init_api_resources=True) results = [] for c in clusters: name = c['name'] oc = oc_map.get(name) if not oc: continue if 'ClusterDeployment' not in oc.api_resources: continue logging.info(f'[{name}] getting ClusterDeployments') cds = oc.get_all('ClusterDeployment', all_namespaces=True)['items'] for cd in cds: try: item = { 'id': cd['spec']['clusterMetadata']['clusterID'], 'cluster': name, } results.append(item) except KeyError: pass if not dry_run: logging.info('writing ClusterDeployments to vault') vault_client = VaultClient() secret = { 'path': f"{vault_output_path}/{QONTRACT_INTEGRATION}", 'data': { 'map': '\n'.join(f"{item['id']}: {item['cluster']}" for item in results) } } vault_client.write(secret, decode_base64=False)
def test_missing_cluster_automation_token(self, mock_secret_reader, mock_oc): cluster_1 = { 'name': 'cl1', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } namespace_1 = { 'name': 'ns1', 'clusterAdmin': True, 'cluster': cluster_1 } oc_map = OC_Map(namespaces=[namespace_1]) # check that non-priv OC got instantiated but priv one not self.assertEqual(oc_map.clusters(), ["cl1"]) self.assertEqual(oc_map.clusters(privileged=True), []) self.assertEqual(oc_map.clusters(include_errors=True, privileged=True), [cluster_1['name']]) self.assertIsInstance(oc_map.get(cluster_1['name']), OC) self.assertFalse(oc_map.get(cluster_1['name'], privileged=True))
def test_missing_cluster_automation_token(self, mock_secret_reader, mock_oc): cluster_1 = { "name": "cl1", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } namespace_1 = { "name": "ns1", "clusterAdmin": True, "cluster": cluster_1 } oc_map = OC_Map(namespaces=[namespace_1]) # check that non-priv OC got instantiated but priv one not self.assertEqual(oc_map.clusters(), ["cl1"]) self.assertEqual(oc_map.clusters(privileged=True), []) self.assertEqual(oc_map.clusters(include_errors=True, privileged=True), [cluster_1["name"]]) self.assertIsInstance(oc_map.get(cluster_1["name"]), OC) self.assertFalse(oc_map.get(cluster_1["name"], privileged=True))
def run(dry_run, vault_output_path): """Get Hive ClusterDeployments from clusters and save mapping to Vault""" if not vault_output_path: logging.error("must supply vault output path") sys.exit(ExitCodes.ERROR) clusters = queries.get_clusters() settings = queries.get_app_interface_settings() oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, thread_pool_size=1, settings=settings, init_api_resources=True, ) results = [] for c in clusters: name = c["name"] oc = oc_map.get(name) if not oc: continue if "ClusterDeployment" not in oc.api_resources: continue logging.info(f"[{name}] getting ClusterDeployments") cds = oc.get_all("ClusterDeployment", all_namespaces=True)["items"] for cd in cds: try: item = { "id": cd["spec"]["clusterMetadata"]["clusterID"], "cluster": name, } results.append(item) except KeyError: pass if not dry_run: logging.info("writing ClusterDeployments to vault") vault_client = VaultClient() secret = { "path": f"{vault_output_path}/{QONTRACT_INTEGRATION}", "data": { "map": "\n".join(f"{item['id']}: {item['cluster']}" for item in results) }, } vault_client.write(secret, decode_base64=False)
def test_missing_automationtoken(self): """ When a cluster with a missing automationToken is passed into OC_Map, it should be skipped. """ cluster = { "name": "test-1", "serverUrl": "http://localhost", "automationToken": None, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] has no automation token', ) self.assertEqual(len(oc_map.clusters()), 0)
def test_missing_serverurl(self): """ When a cluster with a missing serverUrl is passed into OC_Map, it should be skipped. """ cluster = { "name": "test-1", "serverUrl": "", "automationToken": { "path": "some-path", "field": "some-field" }, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] has no serverUrl') self.assertEqual(len(oc_map.clusters()), 0)
def test_automationtoken_not_found(self, mock_secret_reader): mock_secret_reader.side_effect = SecretNotFound cluster = { "name": "test-1", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] secret not found') self.assertEqual(len(oc_map.clusters()), 0)
def get_oc_map(namespaces: List[Any], internal: Optional[bool], use_jump_host: bool, thread_pool_size: int) -> OC_Map: """ Get an OC_Map for our namespaces """ settings = queries.get_app_interface_settings() return OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True)
def test_missing_serverurl(self): """ When a cluster with a missing serverUrl is passed into OC_Map, it should be skipped. """ cluster = { 'name': 'test-1', 'serverUrl': '', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] has no serverUrl') self.assertEqual(len(oc_map.clusters()), 0)
def test_automationtoken_not_found(self, mock_secret_reader): mock_secret_reader.side_effect = SecretNotFound cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] secret not found') self.assertEqual(len(oc_map.clusters()), 0)
def label(inv_item: Tuple[str, str, Types], oc_map: OC_Map, dry_run: bool, inventory: LabelInventory): cluster, namespace, types = inv_item if inventory.errors(cluster, namespace): return changed = types.get(CHANGED, {}) if changed: prefix = "[dry-run] " if dry_run else "" _LOG.info(prefix + f'Updating labels on {cluster}/{namespace}: {changed}') if not dry_run: oc: OCNative = oc_map.get(cluster) oc.label(None, 'Namespace', namespace, changed, overwrite=True)