def test_missing_cluster_automation_token(self, mock_secret_reader,
                                              mock_oc):
        cluster_1 = {
            "name": "cl1",
            "serverUrl": "http://localhost",
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        namespace_1 = {
            "name": "ns1",
            "clusterAdmin": True,
            "cluster": cluster_1
        }

        oc_map = OC_Map(namespaces=[namespace_1])

        # check that non-priv OC got instantiated but priv one not
        self.assertEqual(oc_map.clusters(), ["cl1"])
        self.assertEqual(oc_map.clusters(privileged=True), [])
        self.assertEqual(oc_map.clusters(include_errors=True, privileged=True),
                         [cluster_1["name"]])

        self.assertIsInstance(oc_map.get(cluster_1["name"]), OC)
        self.assertFalse(oc_map.get(cluster_1["name"], privileged=True))
    def test_missing_cluster_automation_token(self, mock_secret_reader,
                                              mock_oc):
        cluster_1 = {
            'name': 'cl1',
            'serverUrl': 'http://localhost',
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        namespace_1 = {
            'name': 'ns1',
            'clusterAdmin': True,
            'cluster': cluster_1
        }

        oc_map = OC_Map(namespaces=[namespace_1])

        # check that non-priv OC got instantiated but priv one not
        self.assertEqual(oc_map.clusters(), ["cl1"])
        self.assertEqual(oc_map.clusters(privileged=True), [])
        self.assertEqual(oc_map.clusters(include_errors=True, privileged=True),
                         [cluster_1['name']])

        self.assertIsInstance(oc_map.get(cluster_1['name']), OC)
        self.assertFalse(oc_map.get(cluster_1['name'], privileged=True))
def root_owner(ctx, cluster, namespace, kind, name):
    settings = queries.get_app_interface_settings()
    clusters = [
        c for c in queries.get_clusters(minimal=True) if c['name'] == cluster
    ]
    oc_map = OC_Map(clusters=clusters,
                    integration='qontract-cli',
                    thread_pool_size=1,
                    settings=settings,
                    init_api_resources=True)
    oc = oc_map.get(cluster)
    obj = oc.get(namespace, kind, name)
    root_owner = oc.get_obj_root_owner(namespace,
                                       obj,
                                       allow_not_found=True,
                                       allow_not_controller=True)

    # TODO(mafriedm): fix this
    # do not sort
    ctx.obj['options']['sort'] = False
    # a bit hacky, but ¯\_(ツ)_/¯
    if ctx.obj['options']['output'] != 'json':
        ctx.obj['options']['output'] = 'yaml'

    print_output(ctx.obj['options'], root_owner)
Example #4
0
def manage_namespaces(spec: Mapping[str, str], oc_map: OC_Map,
                      dry_run: bool) -> None:
    cluster = spec['cluster']
    namespace = spec['namespace']
    desired_state = spec["desired_state"]

    oc = oc_map.get(cluster)
    if not oc:
        logging.log(level=oc.log_level, msg=oc.message)
        return None

    act = {
        NS_ACTION_CREATE: oc.new_project,
        NS_ACTION_DELETE: oc.delete_project
    }

    exists = oc.project_exists(namespace)
    action = None
    if not exists and desired_state == NS_STATE_PRESENT:
        action = NS_ACTION_CREATE
    elif exists and desired_state == NS_STATE_ABSENT:
        action = NS_ACTION_DELETE

    if action:
        logging.info([action, cluster, namespace])
        if not dry_run:
            act[action](namespace)
Example #5
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_state_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get("ocm")]
    oc_map = OC_Map(
        clusters=clusters,
        integration=QONTRACT_INTEGRATION,
        settings=settings,
        internal=internal,
        use_jump_host=use_jump_host,
        thread_pool_size=thread_pool_size,
    )
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = slackapi_from_queries(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(
            namespace="openshift-managed-upgrade-operator",
            kind="UpgradeConfig",
            allow_not_found=True,
        )["items"]
        if not upgrade_config:
            logging.debug(f"[{cluster}] UpgradeConfig not found.")
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config["spec"]
        upgrade_at = upgrade_spec["upgradeAt"]
        version = upgrade_spec["desired"]["version"]
        upgrade_at_obj = datetime.strptime(upgrade_at, "%Y-%m-%dT%H:%M:%SZ")
        state_key = f"{cluster}-{upgrade_at}"
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(["cluster_upgrade", cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f"{cluster}-cluster"
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f"Heads up <!subteam^{usergroup_id}>! " +
                    f"cluster `{cluster}` is currently " +
                    f"being upgraded to version `{version}`")
    def test_namespace_with_cluster_admin(self, mock_secret_reader, mock_oc):
        cluster_1 = {
            "name": "cl1",
            "serverUrl": "http://localhost",
            "clusterAdminAutomationToken": {
                "path": "some-path",
                "field": "some-field"
            },
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        cluster_2 = {
            "name": "cl2",
            "serverUrl": "http://localhost",
            "clusterAdminAutomationToken": {
                "path": "some-path",
                "field": "some-field"
            },
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        namespace_1 = {
            "name": "ns1",
            "clusterAdmin": True,
            "cluster": cluster_1
        }

        namespace_2 = {"name": "ns2", "cluster": cluster_2}

        oc_map = OC_Map(namespaces=[namespace_1, namespace_2])

        self.assertEqual(oc_map.clusters(), ["cl1", "cl2"])
        self.assertEqual(oc_map.clusters(privileged=True), ["cl1"])

        # both clusters are present as non privileged clusters in the map
        self.assertIsInstance(oc_map.get(cluster_1["name"]), OC)
        self.assertIsInstance(oc_map.get(cluster_2["name"]), OC)

        # only cluster_1 is present as privileged cluster in the map
        self.assertIsInstance(oc_map.get(cluster_1["name"], privileged=True),
                              OC)
        self.assertIsInstance(oc_map.get(cluster_2["name"], privileged=True),
                              OCLogMsg)
    def test_namespace_with_cluster_admin(self, mock_secret_reader, mock_oc):
        cluster_1 = {
            'name': 'cl1',
            'serverUrl': 'http://localhost',
            'clusterAdminAutomationToken': {
                'path': 'some-path',
                'field': 'some-field'
            },
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        cluster_2 = {
            'name': 'cl2',
            'serverUrl': 'http://localhost',
            'clusterAdminAutomationToken': {
                'path': 'some-path',
                'field': 'some-field'
            },
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        namespace_1 = {
            'name': 'ns1',
            'clusterAdmin': True,
            'cluster': cluster_1
        }

        namespace_2 = {'name': 'ns2', 'cluster': cluster_2}

        oc_map = OC_Map(namespaces=[namespace_1, namespace_2])

        self.assertEqual(oc_map.clusters(), ["cl1", "cl2"])
        self.assertEqual(oc_map.clusters(privileged=True), ["cl1"])

        # both clusters are present as non privileged clusters in the map
        self.assertIsInstance(oc_map.get(cluster_1['name']), OC)
        self.assertIsInstance(oc_map.get(cluster_2['name']), OC)

        # only cluster_1 is present as privileged cluster in the map
        self.assertIsInstance(oc_map.get(cluster_1['name'], privileged=True),
                              OC)
        self.assertIsInstance(oc_map.get(cluster_2['name'], privileged=True),
                              OCLogMsg)
Example #8
0
    def test_missing_automationtoken(self):
        """
        When a cluster with a missing automationToken is passed into OC_Map, it
        should be skipped.
        """
        cluster = {
            'name': 'test-1',
            'serverUrl': 'http://localhost',
            'automationToken': None
        }
        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster['name']).message,
            f'[{cluster["name"]}] has no automation token')
        self.assertEqual(len(oc_map.clusters()), 0)
    def test_missing_automationtoken(self):
        """
        When a cluster with a missing automationToken is passed into OC_Map, it
        should be skipped.
        """
        cluster = {
            "name": "test-1",
            "serverUrl": "http://localhost",
            "automationToken": None,
        }
        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster["name"]).message,
            f'[{cluster["name"]}] has no automation token',
        )
        self.assertEqual(len(oc_map.clusters()), 0)
Example #10
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(oc_map.cleanup)
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters(include_errors=True):
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                allow_not_found=True)['items']
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue
        [upgrade_config] = upgrade_config

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
    def test_internal_clusters(self, selfmock_secret_reader, mock_oc):
        cluster = {
            'name': 'cl1',
            'serverUrl': 'http://localhost',
            'internal': True,
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        namespace = {'name': 'ns1', 'cluster': cluster}

        # internal cluster must be in oc_map when internal is enabled
        internal_oc_map = OC_Map(internal=True, namespaces=[namespace])
        self.assertIsInstance(internal_oc_map.get(cluster['name']), OC)

        # internal cluster must not be in oc_map when internal is disabled
        oc_map = OC_Map(internal=False, namespaces=[namespace])
        self.assertFalse(oc_map.get(cluster['name']))
    def test_internal_clusters(self, selfmock_secret_reader, mock_oc):
        cluster = {
            "name": "cl1",
            "serverUrl": "http://localhost",
            "internal": True,
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        namespace = {"name": "ns1", "cluster": cluster}

        # internal cluster must be in oc_map when internal is enabled
        internal_oc_map = OC_Map(internal=True, namespaces=[namespace])
        self.assertIsInstance(internal_oc_map.get(cluster["name"]), OC)

        # internal cluster must not be in oc_map when internal is disabled
        oc_map = OC_Map(internal=False, namespaces=[namespace])
        self.assertFalse(oc_map.get(cluster["name"]))
Example #13
0
    def test_automationtoken_not_found(self, mock_secret_reader):

        mock_secret_reader.side_effect = SecretNotFound

        cluster = {
            'name': 'test-1',
            'serverUrl': 'http://localhost',
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }

        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster['name']).message,
            f'[{cluster["name"]}] secret not found')
        self.assertEqual(len(oc_map.clusters()), 0)
    def test_missing_serverurl(self):
        """
        When a cluster with a missing serverUrl is passed into OC_Map, it
        should be skipped.
        """
        cluster = {
            "name": "test-1",
            "serverUrl": "",
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster["name"]).message,
            f'[{cluster["name"]}] has no serverUrl')
        self.assertEqual(len(oc_map.clusters()), 0)
    def test_automationtoken_not_found(self, mock_secret_reader):

        mock_secret_reader.side_effect = SecretNotFound

        cluster = {
            "name": "test-1",
            "serverUrl": "http://localhost",
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }

        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster["name"]), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster["name"]).message,
            f'[{cluster["name"]}] secret not found')
        self.assertEqual(len(oc_map.clusters()), 0)
Example #16
0
    def test_missing_serverurl(self):
        """
        When a cluster with a missing serverUrl is passed into OC_Map, it
        should be skipped.
        """
        cluster = {
            'name': 'test-1',
            'serverUrl': '',
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        oc_map = OC_Map(clusters=[cluster])

        self.assertIsInstance(oc_map.get(cluster['name']), OCLogMsg)
        self.assertEqual(
            oc_map.get(cluster['name']).message,
            f'[{cluster["name"]}] has no serverUrl')
        self.assertEqual(len(oc_map.clusters()), 0)
def label(inv_item: Tuple[str, str, Types], oc_map: OC_Map, dry_run: bool,
          inventory: LabelInventory):
    cluster, namespace, types = inv_item
    if inventory.errors(cluster, namespace):
        return
    changed = types.get(CHANGED, {})
    if changed:
        prefix = "[dry-run] " if dry_run else ""
        _LOG.info(prefix +
                  f'Updating labels on {cluster}/{namespace}: {changed}')
        if not dry_run:
            oc: OCNative = oc_map.get(cluster)
            oc.label(None, 'Namespace', namespace, changed, overwrite=True)
Example #18
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    pipelines_providers = queries.get_pipelines_providers()
    tkn_namespaces = [
        pp['namespace'] for pp in pipelines_providers
        if pp['provider'] == Providers.TEKTON
    ]

    oc_map = OC_Map(namespaces=tkn_namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(lambda: oc_map.cleanup())

    for pp in pipelines_providers:
        retention = pp.get('retention')
        if not retention:
            continue

        if pp['provider'] == Providers.TEKTON:
            ns_info = pp['namespace']
            namespace = ns_info['name']
            cluster = ns_info['cluster']['name']
            oc = oc_map.get(cluster)
            pipeline_runs = sorted(
                oc.get(namespace, 'PipelineRun')['items'],
                key=lambda k: k['metadata']['creationTimestamp'])

            retention_min = retention.get('minimum')
            if retention_min:
                pipeline_runs = pipeline_runs[retention_min:]

            retention_days = retention.get('days')
            for pr in pipeline_runs:
                name = pr['metadata']['name']
                if retention_days and \
                        within_retention_days(pr, retention_days):
                    continue

                logging.info([
                    'delete_trigger', cluster, namespace, 'PipelineRun', name
                ])
                if not dry_run:
                    oc.delete(namespace, 'PipelineRun', name)
    def test_disabled_integration(self, selfmock_secret_reader, mock_oc):
        calling_int = 'calling_integration'
        cluster = {
            'name': 'cl1',
            'serverUrl': 'http://localhost',
            'disable': {
                'integrations': [calling_int.replace('_', '-')]
            },
            'automationToken': {
                'path': 'some-path',
                'field': 'some-field'
            }
        }
        namespace = {'name': 'ns1', 'cluster': cluster}

        oc_map = OC_Map(integration=calling_int, namespaces=[namespace])
        self.assertFalse(oc_map.get(cluster['name']))
    def test_disabled_integration(self, selfmock_secret_reader, mock_oc):
        calling_int = "calling_integration"
        cluster = {
            "name": "cl1",
            "serverUrl": "http://localhost",
            "disable": {
                "integrations": [calling_int.replace("_", "-")]
            },
            "automationToken": {
                "path": "some-path",
                "field": "some-field"
            },
        }
        namespace = {"name": "ns1", "cluster": cluster}

        oc_map = OC_Map(integration=calling_int, namespaces=[namespace])
        self.assertFalse(oc_map.get(cluster["name"]))
def run(dry_run, vault_output_path):
    """Get Hive ClusterDeployments from clusters and save mapping to Vault"""
    if not vault_output_path:
        logging.error('must supply vault output path')
        sys.exit(ExitCodes.ERROR)

    clusters = queries.get_clusters()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    thread_pool_size=1,
                    settings=settings,
                    init_api_resources=True)
    results = []
    for c in clusters:
        name = c['name']
        oc = oc_map.get(name)
        if not oc:
            continue
        if 'ClusterDeployment' not in oc.api_resources:
            continue
        logging.info(f'[{name}] getting ClusterDeployments')
        cds = oc.get_all('ClusterDeployment', all_namespaces=True)['items']
        for cd in cds:
            try:
                item = {
                    'id': cd['spec']['clusterMetadata']['clusterID'],
                    'cluster': name,
                }
                results.append(item)
            except KeyError:
                pass

    if not dry_run:
        logging.info('writing ClusterDeployments to vault')
        vault_client = VaultClient()
        secret = {
            'path': f"{vault_output_path}/{QONTRACT_INTEGRATION}",
            'data': {
                'map':
                '\n'.join(f"{item['id']}: {item['cluster']}"
                          for item in results)
            }
        }
        vault_client.write(secret, decode_base64=False)
def run(dry_run, vault_output_path):
    """Get Hive ClusterDeployments from clusters and save mapping to Vault"""
    if not vault_output_path:
        logging.error("must supply vault output path")
        sys.exit(ExitCodes.ERROR)

    clusters = queries.get_clusters()
    settings = queries.get_app_interface_settings()
    oc_map = OC_Map(
        clusters=clusters,
        integration=QONTRACT_INTEGRATION,
        thread_pool_size=1,
        settings=settings,
        init_api_resources=True,
    )
    results = []
    for c in clusters:
        name = c["name"]
        oc = oc_map.get(name)
        if not oc:
            continue
        if "ClusterDeployment" not in oc.api_resources:
            continue
        logging.info(f"[{name}] getting ClusterDeployments")
        cds = oc.get_all("ClusterDeployment", all_namespaces=True)["items"]
        for cd in cds:
            try:
                item = {
                    "id": cd["spec"]["clusterMetadata"]["clusterID"],
                    "cluster": name,
                }
                results.append(item)
            except KeyError:
                pass

    if not dry_run:
        logging.info("writing ClusterDeployments to vault")
        vault_client = VaultClient()
        secret = {
            "path": f"{vault_output_path}/{QONTRACT_INTEGRATION}",
            "data": {
                "map": "\n".join(f"{item['id']}: {item['cluster']}" for item in results)
            },
        }
        vault_client.write(secret, decode_base64=False)
def delete(dry_run: bool,
           oc_map: OC_Map,
           cluster: str,
           namespace: str,
           resource_type: str,
           name: str,
           enable_deletion: bool,
           privileged: bool = False) -> None:
    logging.info(['delete', cluster, namespace, resource_type, name])

    if not enable_deletion:
        logging.error('\'delete\' action is disabled due to previous errors.')
        return

    oc = oc_map.get(cluster, privileged)
    if not oc:
        logging.log(level=oc.log_level, msg=oc.message)
        return None
    if not dry_run:
        oc.delete(namespace, resource_type, name)
Example #24
0
    def _fetch_oc_secret(self) -> str:
        parts = self._command_data.openshift_path.split("/")
        if len(parts) != 3:
            raise ArgumentException(
                f"Wrong format! --openshift-path must be of format {{cluster}}/{{namespace}}/{{secret}}. Got {self._command_data.openshift_path}"
            )
        cluster_name, namespace, secret = parts
        clusters = queries.get_clusters_by(
            filter=queries.ClusterFilter(
                name=cluster_name,
            )
        )

        if not clusters:
            raise ArgumentException(f"No cluster found with name '{cluster_name}'")

        settings = queries.get_app_interface_settings()
        data = {}

        try:
            oc_map = OC_Map(
                clusters=clusters,
                integration="qontract-cli",
                settings=settings,
                use_jump_host=True,
                thread_pool_size=1,
                init_projects=False,
            )
            oc = oc_map.get(cluster_name)
            data = oc.get(namespace, "Secret", name=secret, allow_not_found=False)[
                "data"
            ]
        except Exception as e:
            raise OpenshiftException(
                f"Could not fetch secret from Openshift cluster {cluster_name}"
            ) from e

        return GPGEncryptCommand._format(data)
def lookup_namespaces(cluster: str, oc_map: OC_Map):
    """
    Retrieve all namespaces from the given cluster
    """
    try:
        oc = oc_map.get(cluster)
        if not oc:
            # cluster is not reachable (may be used --internal / --external ?)
            _LOG.debug(f"Skipping not-handled cluster: {cluster}")
            return cluster, None
        _LOG.debug(f"Looking up namespaces on {cluster}")
        namespaces = oc.get_all("Namespace")
        if namespaces:
            return cluster, namespaces["items"]
    except StatusCodeError as e:
        msg = "cluster: {}, exception: {}"
        msg = msg.format(cluster, str(e))
        _LOG.error(msg)
    except ApiException as e:
        _LOG.error(f"Cluster {cluster} skipped: "
                   f"APIException [{e.status}:{e.reason}] {e.body}")

    return cluster, None
def apply(dry_run: bool,
          oc_map: OC_Map,
          cluster: str,
          namespace: str,
          resource_type: OR,
          resource,
          wait_for_namespace: bool,
          recycle_pods: bool = True,
          privileged: bool = False) -> None:
    logging.info(['apply', cluster, namespace, resource_type, resource.name])

    oc = oc_map.get(cluster, privileged)
    if not oc:
        logging.log(level=oc.log_level, msg=oc.message)
        return None
    if not dry_run:
        annotated = resource.annotate()
        # skip if namespace does not exist (as it will soon)
        # do not skip if this is a cluster scoped integration
        if namespace != 'cluster' and not oc.project_exists(namespace):
            msg = f"[{cluster}/{namespace}] namespace does not exist (yet)."
            if wait_for_namespace:
                logging.info(msg + ' waiting...')
                wait_for_namespace_exists(oc, namespace)
            else:
                logging.warning(msg)
                return

        try:
            oc.apply(namespace, annotated)
        except InvalidValueApplyError:
            oc.remove_last_applied_configuration(namespace, resource_type,
                                                 resource.name)
            oc.apply(namespace, annotated)
        except (MetaDataAnnotationsTooLongApplyError,
                UnsupportedMediaTypeError):
            if not oc.get(
                    namespace, resource_type, resource.name,
                    allow_not_found=True):
                oc.create(namespace, annotated)
            oc.replace(namespace, annotated)
        except FieldIsImmutableError:
            # Add more resources types to the list when you're
            # sure they're safe.
            if resource_type not in ['Route', 'Service', 'Secret']:
                raise

            oc.delete(namespace=namespace,
                      kind=resource_type,
                      name=resource.name)
            oc.apply(namespace=namespace, resource=annotated)
        except (MayNotChangeOnceSetError, PrimaryClusterIPCanNotBeUnsetError):
            if resource_type not in ['Service']:
                raise

            oc.delete(namespace=namespace,
                      kind=resource_type,
                      name=resource.name)
            oc.apply(namespace=namespace, resource=annotated)
        except StatefulSetUpdateForbidden:
            if resource_type != 'StatefulSet':
                raise

            logging.info([
                'delete_sts_and_apply', cluster, namespace, resource_type,
                resource.name
            ])
            current_resource = oc.get(namespace, resource_type, resource.name)
            current_storage = oc.get_storage(current_resource)
            desired_storage = oc.get_storage(resource.body)
            resize_required = current_storage != desired_storage
            if resize_required:
                owned_pods = oc.get_owned_pods(namespace, resource)
                owned_pvc_names = oc.get_pod_owned_pvc_names(owned_pods)
            oc.delete(namespace=namespace,
                      kind=resource_type,
                      name=resource.name,
                      cascade=False)
            oc.apply(namespace=namespace, resource=annotated)
            # the resource was applied without cascading.
            # if the change was in the storage, we need to
            # take care of the resize ourselves.
            # ref: https://github.com/kubernetes/enhancements/pull/2842
            if resize_required:
                logging.info(
                    ['resizing_pvcs', cluster, namespace, owned_pvc_names])
                oc.resize_pvcs(namespace, owned_pvc_names, desired_storage)

    if recycle_pods:
        oc.recycle_pods(dry_run, namespace, resource_type, resource)
Example #27
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseMirrorError(f"Can't create ocm client for "
                                        f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseMirrorError(f"Can't create oc client for "
                                        f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds "
                                        f"identifier "
                                        f"{ocp_release_identifier} in "
                                        f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseMirrorError(f"Could not find rds identifier"
                                        f" {ocp_art_dev_identifier} in"
                                        f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseMirrorError(f"Could not find the "
                                        f"ECR repository "
                                        f"{ocp_art_dev_identifier}")

        # Process all the quayOrgTargets
        quay_api_store = get_quay_api_store()
        self.quay_target_orgs = []
        for quayTargetOrg in instance['quayTargetOrgs']:
            org_name = quayTargetOrg['name']
            instance_name = quayTargetOrg['instance']['name']
            org_key = OrgKey(instance_name, org_name)
            org_info = quay_api_store[org_key]

            if not org_info['push_token']:
                raise OcpReleaseMirrorError(
                    f'{org_key} has no push_token defined.')

            url = org_info['url']
            user = org_info['push_token']['user']
            token = org_info['push_token']['token']

            self.quay_target_orgs.append({
                'url':
                url,
                'dest_ocp_release':
                f"{url}/{org_name}/ocp-release",
                'dest_ocp_art_dev':
                f"{url}/{org_name}/ocp-v4.0-art-dev",
                'auths':
                self._build_quay_auths(url, user, token)
            })

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }

        # Append quay_target_orgs auths to registry_creds
        for quay_target_org in self.quay_target_orgs:
            url = quay_target_org['url']

            if url in self.registry_creds['auths'].keys():
                OcpReleaseMirrorError('Cannot mirror to the same Quay '
                                      f'instance multiple times: {url}')

            self.registry_creds['auths'].update(quay_target_org['auths'])

        # Initiate channel groups
        self.channel_groups = instance['mirrorChannels']
def init_specs_to_fetch(
        ri: ResourceInventory,
        oc_map: OC_Map,
        namespaces: Optional[Iterable[Mapping]] = None,
        clusters: Optional[Iterable[Mapping]] = None,
        override_managed_types: Optional[Iterable[str]] = None,
        managed_types_key: str = 'managedResourceTypes') -> list[StateSpec]:
    state_specs = []

    if clusters and namespaces:
        raise KeyError('expected only one of clusters or namespaces.')
    elif namespaces:
        for namespace_info in namespaces:
            if override_managed_types is None:
                managed_types = set(
                    namespace_info.get(managed_types_key) or [])
            else:
                managed_types = set(override_managed_types)

            if not managed_types:
                continue

            cluster = namespace_info['cluster']['name']
            privileged = namespace_info.get("clusterAdmin", False) is True
            oc = oc_map.get(cluster, privileged)
            if not oc:
                if oc.log_level >= logging.ERROR:
                    ri.register_error()
                logging.log(level=oc.log_level, msg=oc.message)
                continue

            namespace = namespace_info['name']
            # These may exit but have a value of None
            managed_resource_names = \
                namespace_info.get('managedResourceNames') or []
            managed_resource_type_overrides = \
                namespace_info.get('managedResourceTypeOverrides') or []

            # Initialize current state specs
            for resource_type in managed_types:
                ri.initialize_resource_type(cluster, namespace, resource_type)
            resource_names = {}
            resource_type_overrides = {}
            for mrn in managed_resource_names:
                # Current implementation guarantees only one
                # managed_resource_name of each managed type
                if mrn['resource'] in managed_types:
                    resource_names[mrn['resource']] = mrn['resourceNames']
                elif override_managed_types:
                    logging.debug(
                        f"Skipping resource {mrn['resource']} in {cluster}/"
                        f"{namespace} because the integration explicitly "
                        "dismisses it")
                else:
                    raise KeyError(
                        f"Non-managed resource name {mrn} listed on "
                        f"{cluster}/{namespace} (valid kinds: {managed_types})"
                    )

            for o in managed_resource_type_overrides:
                # Current implementation guarantees only one
                # override of each managed type
                if o['resource'] in managed_types:
                    resource_type_overrides[o['resource']] = o['override']
                elif override_managed_types:
                    logging.debug(
                        f"Skipping resource type override {o} listed on"
                        f"{cluster}/{namespace} because the integration "
                        "dismisses it explicitly")
                else:
                    raise KeyError(
                        f"Non-managed override {o} listed on "
                        f"{cluster}/{namespace} (valid kinds: {managed_types})"
                    )

            for kind, names in resource_names.items():
                c_spec = StateSpec(
                    "current",
                    oc,
                    cluster,
                    namespace,
                    kind,
                    resource_type_override=resource_type_overrides.get(kind),
                    resource_names=names)
                state_specs.append(c_spec)
                managed_types.remove(kind)

            # Produce "empty" StateSpec's for any resource type that
            # doesn't have an explicit managedResourceName listed in
            # the namespace
            state_specs.extend(
                StateSpec("current",
                          oc,
                          cluster,
                          namespace,
                          t,
                          resource_type_override=resource_type_overrides.get(
                              t),
                          resource_names=None) for t in managed_types)

            # Initialize desired state specs
            openshift_resources = namespace_info.get('openshiftResources')
            for openshift_resource in openshift_resources or []:
                d_spec = StateSpec("desired",
                                   oc,
                                   cluster,
                                   namespace,
                                   openshift_resource,
                                   namespace_info,
                                   privileged=privileged)
                state_specs.append(d_spec)
    elif clusters:
        # set namespace to something indicative
        namespace = 'cluster'
        for cluster_info in clusters:
            cluster = cluster_info['name']
            oc = oc_map.get(cluster)
            if not oc:
                if oc.log_level >= logging.ERROR:
                    ri.register_error()
                logging.log(level=oc.log_level, msg=oc.message)
                continue

            # we currently only use override_managed_types,
            # and not allow a `managedResourcesTypes` field in a cluster file
            for resource_type in override_managed_types or []:
                ri.initialize_resource_type(cluster, namespace, resource_type)
                # Initialize current state specs
                c_spec = StateSpec("current", oc, cluster, namespace,
                                   resource_type)
                state_specs.append(c_spec)
                # Initialize desired state specs
                d_spec = StateSpec("desired", oc, cluster, namespace,
                                   resource_type)
                state_specs.append(d_spec)
    else:
        raise KeyError('expected one of clusters or namespaces.')

    return state_specs
Example #29
0
    def __init__(self, dry_run, instance):
        self.dry_run = dry_run
        self.settings = queries.get_app_interface_settings()

        cluster_info = instance['hiveCluster']
        hive_cluster = instance['hiveCluster']['name']

        # Getting the OCM Client for the hive cluster
        ocm_map = OCMMap(clusters=[cluster_info],
                         integration=QONTRACT_INTEGRATION,
                         settings=self.settings)

        self.ocm_cli = ocm_map.get(hive_cluster)
        if not self.ocm_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create ocm client for "
                                           f"cluster {hive_cluster}")

        # Getting the OC Client for the hive cluster
        oc_map = OC_Map(clusters=[cluster_info],
                        integration=QONTRACT_INTEGRATION,
                        settings=self.settings)
        self.oc_cli = oc_map.get(hive_cluster)
        if not self.oc_cli:
            raise OcpReleaseEcrMirrorError(f"Can't create oc client for "
                                           f"cluster {hive_cluster}")

        namespace = instance['ecrResourcesNamespace']
        ocp_release_identifier = instance['ocpReleaseEcrIdentifier']
        ocp_art_dev_identifier = instance['ocpArtDevEcrIdentifier']

        ocp_release_info = self._get_tf_resource_info(namespace,
                                                      ocp_release_identifier)
        if ocp_release_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds "
                                           f"identifier "
                                           f"{ocp_release_identifier} in "
                                           f"namespace {namespace['name']}")

        ocp_art_dev_info = self._get_tf_resource_info(namespace,
                                                      ocp_art_dev_identifier)
        if ocp_art_dev_info is None:
            raise OcpReleaseEcrMirrorError(f"Could not find rds identifier"
                                           f" {ocp_art_dev_identifier} in"
                                           f"namespace {namespace['name']}")

        # Getting the AWS Client for the accounts
        aws_accounts = [
            self._get_aws_account_info(account=ocp_release_info['account']),
            self._get_aws_account_info(account=ocp_art_dev_info['account'])
        ]
        self.aws_cli = AWSApi(thread_pool_size=1,
                              accounts=aws_accounts,
                              settings=self.settings,
                              init_ecr_auth_tokens=True)
        self.aws_cli.map_ecr_resources()

        self.ocp_release_ecr_uri = self._get_image_uri(
            account=ocp_release_info['account'],
            repository=ocp_release_identifier)
        if self.ocp_release_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_release_identifier}")

        self.ocp_art_dev_ecr_uri = self._get_image_uri(
            account=ocp_art_dev_info['account'],
            repository=ocp_art_dev_identifier)
        if self.ocp_art_dev_ecr_uri is None:
            raise OcpReleaseEcrMirrorError(f"Could not find the "
                                           f"ECR repository "
                                           f"{ocp_art_dev_identifier}")

        # Getting all the credentials
        quay_creds = self._get_quay_creds()
        ocp_release_creds = self._get_ecr_creds(
            account=ocp_release_info['account'],
            region=ocp_release_info['region'])
        ocp_art_dev_creds = self._get_ecr_creds(
            account=ocp_art_dev_info['account'],
            region=ocp_art_dev_info['region'])

        # Creating a single dictionary with all credentials to be used by the
        # "oc adm release mirror" command
        self.registry_creds = {
            'auths': {
                **quay_creds['auths'],
                **ocp_release_creds['auths'],
                **ocp_art_dev_creds['auths'],
            }
        }