def test_missing_cluster_automation_token(self, mock_secret_reader, mock_oc): cluster_1 = { 'name': 'cl1', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } namespace_1 = { 'name': 'ns1', 'clusterAdmin': True, 'cluster': cluster_1 } oc_map = OC_Map(namespaces=[namespace_1]) # check that non-priv OC got instantiated but priv one not self.assertEqual(oc_map.clusters(), ["cl1"]) self.assertEqual(oc_map.clusters(privileged=True), []) self.assertEqual(oc_map.clusters(include_errors=True, privileged=True), [cluster_1['name']]) self.assertIsInstance(oc_map.get(cluster_1['name']), OC) self.assertFalse(oc_map.get(cluster_1['name'], privileged=True))
def test_missing_cluster_automation_token(self, mock_secret_reader, mock_oc): cluster_1 = { "name": "cl1", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } namespace_1 = { "name": "ns1", "clusterAdmin": True, "cluster": cluster_1 } oc_map = OC_Map(namespaces=[namespace_1]) # check that non-priv OC got instantiated but priv one not self.assertEqual(oc_map.clusters(), ["cl1"]) self.assertEqual(oc_map.clusters(privileged=True), []) self.assertEqual(oc_map.clusters(include_errors=True, privileged=True), [cluster_1["name"]]) self.assertIsInstance(oc_map.get(cluster_1["name"]), OC) self.assertFalse(oc_map.get(cluster_1["name"], privileged=True))
def test_clusters_errors_with_include_errors(self, mock_secret_reader, mock_oc): """ With the include_errors kwarg set to true, clusters that didn't initialize a client are still included. """ cluster_1 = { 'name': 'test-1', 'serverUrl': 'http://localhost', } cluster_2 = { 'name': 'test-2', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } cluster_names = [cluster_1['name'], cluster_2['name']] oc_map = OC_Map(clusters=[cluster_1, cluster_2]) self.assertEqual(oc_map.clusters(include_errors=True), cluster_names) self.assertIsInstance(oc_map.oc_map.get(cluster_1['name']), OCLogMsg)
def test_clusters_errors_with_include_errors(self, mock_secret_reader, mock_oc): """ With the include_errors kwarg set to true, clusters that didn't initialize a client are still included. """ cluster_1 = { "name": "test-1", "serverUrl": "http://localhost", } cluster_2 = { "name": "test-2", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } cluster_names = [cluster_1["name"], cluster_2["name"]] oc_map = OC_Map(clusters=[cluster_1, cluster_2]) self.assertEqual(oc_map.clusters(include_errors=True), cluster_names) self.assertIsInstance(oc_map.oc_map.get(cluster_1["name"]), OCLogMsg)
def get_desired_state(internal, use_jump_host, thread_pool_size): gqlapi = gql.get_api() all_namespaces = gqlapi.query(QUERY)['namespaces'] namespaces = [] for namespace in all_namespaces: shard_key = f'{namespace["cluster"]["name"]}/{namespace["name"]}' if is_in_shard(shard_key): namespaces.append(namespace) ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True) ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces, override_managed_types=['Namespace']) desired_state = [] for cluster, namespace, _, _ in ri: if cluster not in oc_map.clusters(): continue desired_state.append({"cluster": cluster, "namespace": namespace}) return oc_map, desired_state
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")] oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = slackapi_from_queries(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get( namespace="openshift-managed-upgrade-operator", kind="UpgradeConfig", allow_not_found=True, )["items"] if not upgrade_config: logging.debug(f"[{cluster}] UpgradeConfig not found.") continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config["spec"] upgrade_at = upgrade_spec["upgradeAt"] version = upgrade_spec["desired"]["version"] upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ") state_key = f"{cluster}-{upgrade_at}" # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(["cluster_upgrade", cluster]) if not dry_run: state.add(state_key) usergroup = f"{cluster}-cluster" usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f"Heads up <!subteam^{usergroup_id}>! " + f"cluster `{cluster}` is currently " + f"being upgraded to version `{version}`")
def test_namespace_with_cluster_admin(self, mock_secret_reader, mock_oc): cluster_1 = { 'name': 'cl1', 'serverUrl': 'http://localhost', 'clusterAdminAutomationToken': { 'path': 'some-path', 'field': 'some-field' }, 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } cluster_2 = { 'name': 'cl2', 'serverUrl': 'http://localhost', 'clusterAdminAutomationToken': { 'path': 'some-path', 'field': 'some-field' }, 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } namespace_1 = { 'name': 'ns1', 'clusterAdmin': True, 'cluster': cluster_1 } namespace_2 = {'name': 'ns2', 'cluster': cluster_2} oc_map = OC_Map(namespaces=[namespace_1, namespace_2]) self.assertEqual(oc_map.clusters(), ["cl1", "cl2"]) self.assertEqual(oc_map.clusters(privileged=True), ["cl1"]) # both clusters are present as non privileged clusters in the map self.assertIsInstance(oc_map.get(cluster_1['name']), OC) self.assertIsInstance(oc_map.get(cluster_2['name']), OC) # only cluster_1 is present as privileged cluster in the map self.assertIsInstance(oc_map.get(cluster_1['name'], privileged=True), OC) self.assertIsInstance(oc_map.get(cluster_2['name'], privileged=True), OCLogMsg)
def test_namespace_with_cluster_admin(self, mock_secret_reader, mock_oc): cluster_1 = { "name": "cl1", "serverUrl": "http://localhost", "clusterAdminAutomationToken": { "path": "some-path", "field": "some-field" }, "automationToken": { "path": "some-path", "field": "some-field" }, } cluster_2 = { "name": "cl2", "serverUrl": "http://localhost", "clusterAdminAutomationToken": { "path": "some-path", "field": "some-field" }, "automationToken": { "path": "some-path", "field": "some-field" }, } namespace_1 = { "name": "ns1", "clusterAdmin": True, "cluster": cluster_1 } namespace_2 = {"name": "ns2", "cluster": cluster_2} oc_map = OC_Map(namespaces=[namespace_1, namespace_2]) self.assertEqual(oc_map.clusters(), ["cl1", "cl2"]) self.assertEqual(oc_map.clusters(privileged=True), ["cl1"]) # both clusters are present as non privileged clusters in the map self.assertIsInstance(oc_map.get(cluster_1["name"]), OC) self.assertIsInstance(oc_map.get(cluster_2["name"]), OC) # only cluster_1 is present as privileged cluster in the map self.assertIsInstance(oc_map.get(cluster_1["name"], privileged=True), OC) self.assertIsInstance(oc_map.get(cluster_2["name"], privileged=True), OCLogMsg)
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')] oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(oc_map.cleanup) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(include_errors=True): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator', kind='UpgradeConfig', allow_not_found=True)['items'] if not upgrade_config: logging.debug(f'[{cluster}] UpgradeConfig not found.') continue [upgrade_config] = upgrade_config upgrade_spec = upgrade_config['spec'] upgrade_at = upgrade_spec['upgradeAt'] version = upgrade_spec['desired']['version'] upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ') state_key = f'{cluster}-{upgrade_at}' # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(['cluster_upgrade', cluster]) if not dry_run: state.add(state_key) usergroup = f'{cluster}-cluster' usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f'Heads up <!subteam^{usergroup_id}>! ' + f'cluster `{cluster}` is currently ' + f'being upgraded to version `{version}`')
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters(minimal=True) settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) results = threaded.run(get_cluster_users, oc_map.clusters(), thread_pool_size, oc_map=oc_map) current_state = [item for sublist in results for item in sublist] return oc_map, current_state
def test_clusters_errors_empty_return(self, mock_secret_reader): """ clusters() shouldn't return the names of any clusters that didn't initialize a client successfully. """ cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', } oc_map = OC_Map(clusters=[cluster]) self.assertEqual(oc_map.clusters(), []) self.assertIsInstance(oc_map.oc_map.get(cluster['name']), OCLogMsg)
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = queries.get_clusters(minimal=True) settings = queries.get_app_interface_settings() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) results = threaded.run(get_cluster_users, oc_map.clusters(include_errors=True), thread_pool_size, oc_map=oc_map) current_state = list(itertools.chain.from_iterable(results)) return oc_map, current_state
def run(self): clusters = queries.get_clusters() oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=self.settings, use_jump_host=True, thread_pool_size=self.thread_pool_size) manifests = threaded.run(func=self._get_imagemanifestvuln, iterable=oc_map.clusters(), thread_pool_size=self.thread_pool_size, oc_map=oc_map) threaded.run(func=self._post, iterable=manifests, thread_pool_size=self.thread_pool_size)
def test_missing_automationtoken(self): """ When a cluster with a missing automationToken is passed into OC_Map, it should be skipped. """ cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', 'automationToken': None } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] has no automation token') self.assertEqual(len(oc_map.clusters()), 0)
def test_missing_automationtoken(self): """ When a cluster with a missing automationToken is passed into OC_Map, it should be skipped. """ cluster = { "name": "test-1", "serverUrl": "http://localhost", "automationToken": None, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] has no automation token', ) self.assertEqual(len(oc_map.clusters()), 0)
def test_automationtoken_not_found(self, mock_secret_reader): mock_secret_reader.side_effect = SecretNotFound cluster = { "name": "test-1", "serverUrl": "http://localhost", "automationToken": { "path": "some-path", "field": "some-field" }, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] secret not found') self.assertEqual(len(oc_map.clusters()), 0)
def test_automationtoken_not_found(self, mock_secret_reader): mock_secret_reader.side_effect = SecretNotFound cluster = { 'name': 'test-1', 'serverUrl': 'http://localhost', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] secret not found') self.assertEqual(len(oc_map.clusters()), 0)
def test_missing_serverurl(self): """ When a cluster with a missing serverUrl is passed into OC_Map, it should be skipped. """ cluster = { 'name': 'test-1', 'serverUrl': '', 'automationToken': { 'path': 'some-path', 'field': 'some-field' } } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg) self.assertEqual( oc_map.get(cluster['name']).message, f'[{cluster["name"]}] has no serverUrl') self.assertEqual(len(oc_map.clusters()), 0)
def test_missing_serverurl(self): """ When a cluster with a missing serverUrl is passed into OC_Map, it should be skipped. """ cluster = { "name": "test-1", "serverUrl": "", "automationToken": { "path": "some-path", "field": "some-field" }, } oc_map = OC_Map(clusters=[cluster]) self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg) self.assertEqual( oc_map.get(cluster["name"]).message, f'[{cluster["name"]}] has no serverUrl') self.assertEqual(len(oc_map.clusters()), 0)
def get_desired(inventory: LabelInventory, oc_map: OC_Map, namespaces: List[Any]) -> None: """ Fill the provided label inventory with every desired info from the input namespaces. Ocm_map is used to not register clusters which are unreachable or not configured (due to --internal / --external) """ to_be_ignored = [] for ns in namespaces: if "labels" not in ns: continue cluster, ns_name = get_names_for_namespace(ns) # Skip unreachable / non-hanlded clusters # eg: internal settings may not match --internal / --external param if cluster not in oc_map.clusters(): continue labels = json.loads(ns["labels"]) validation_errors = validate_labels(labels) for err in validation_errors: inventory.add_error(cluster, ns_name, err) if inventory.errors(cluster, ns_name): continue if inventory.get(cluster, ns_name, DESIRED) is not None: # delete at the end of the loop to avoid having a reinsertion at # the third/fifth/.. occurrences to_be_ignored.append((cluster, ns_name)) continue inventory.set(cluster, ns_name, DESIRED, labels) for cluster, ns_name in to_be_ignored: # Log only a warning here and do not report errors nor fail the # integration. # A dedicated integration or PR check will be done to ensure this # case does not occur (anymore) _LOG.debug(f"Found several namespace definitions for " f"{cluster}/{ns_name}. Ignoring") inventory.delete(cluster, ns_name)
def get_current(inventory: LabelInventory, oc_map: OC_Map, thread_pool_size: int) -> None: """ Fill the provided label inventory with every current info from the reachable namespaces. Only namespaces already registered in the inventory will be updated. This avoids registering unhandled namespaces. """ results = threaded.run(lookup_namespaces, oc_map.clusters(), thread_pool_size, oc_map=oc_map) for cluster, ns_list in results: if ns_list is None: continue for ns in ns_list: ns_meta = ns['metadata'] ns_name = ns_meta['name'] # ignore namespaces which are not in our desired list if inventory.get(cluster, ns_name, DESIRED) is None: continue labels = ns_meta.get('labels', {}) inventory.set(cluster, ns_name, CURRENT, labels)