def _get_sample_tkg_cluster_configuration():
    sample_tkg_plus_config = {
        "kind": "TanzuKubernetesCluster",
        "spec": {
            "topology": {
                "workers": {
                    "class": "Gold_storage_profile_name",
                    "count": 1,
                    "storageClass": "development #sample storage class"
                },
                "controlPlane": {
                    "class": "Gold_storage_profile_name",
                    "count": 1,
                    "storageClass": "development"
                }
            },
            "distribution": {
                "version": "v1.16"
            }
        },
        "metadata": {
            "name": "cluster_name",
            "placementPolicy": "placement_policy_name",
            "virtualDataCenterName": "org_virtual_data_center_name"
        }
    }
    sample_apply_spec = yaml.dump(sample_tkg_plus_config)
    CLIENT_LOGGER.info(sample_apply_spec)
    return sample_apply_spec
示例#2
0
def list_clusters(ctx, vdc, org_name, should_print_all):
    """Display clusters in vCD that are visible to the logged in user.

\b
Examples
    vcd cse cluster list
        Display clusters in vCD that are visible to the logged in user.
\b
    vcd cse cluster list -vdc ovdc1
        Display clusters in vdc 'ovdc1'.
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        cluster = Cluster(client)
        if not client.is_sysadmin() and org_name is None:
            org_name = ctx.obj['profiles'].get('org_in_use')
        client_utils.print_paginated_result(
            cluster.list_clusters(vdc=vdc, org=org_name),  # noqa: E501
            should_print_all=should_print_all,
            logger=CLIENT_LOGGER)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
示例#3
0
def _get_sample_cluster_configuration_by_k8_runtime(k8_runtime):
    metadata = rde_1_0_0.Metadata('cluster_name', 'organization_name',
                                  'org_virtual_datacenter_name')
    status = rde_1_0_0.Status()
    settings = rde_1_0_0.Settings(network='ovdc_network_name', ssh_key=None)
    k8_distribution = rde_1_0_0.Distribution(
        template_name='ubuntu-16.04_k8-1.17_weave-2.6.0', template_revision=2)
    control_plane = rde_1_0_0.ControlPlane(
        count=1,
        sizing_class='Large_sizing_policy_name',
        storage_profile='Gold_storage_profile_name')
    workers = rde_1_0_0.Workers(count=2,
                                sizing_class='Medium_sizing_policy_name',
                                storage_profile='Silver_storage_profile')

    nfs = rde_1_0_0.Nfs(count=0,
                        sizing_class='Large_sizing_policy_name',
                        storage_profile='Platinum_storage_profile_name')

    cluster_spec = rde_1_0_0.ClusterSpec(control_plane=control_plane,
                                         k8_distribution=k8_distribution,
                                         settings=settings,
                                         workers=workers,
                                         nfs=nfs)
    cluster_entity = rde_1_0_0.NativeEntity(metadata=metadata,
                                            spec=cluster_spec,
                                            status=status,
                                            kind=k8_runtime.value)

    sample_cluster_config = yaml.dump(dataclasses.asdict(cluster_entity))
    CLIENT_LOGGER.info(sample_cluster_config)
    return sample_cluster_config
示例#4
0
def version(ctx):
    """Display version of CSE plug-in."""
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    cse_info = utils.get_cse_info()
    ver_str = '%s, %s, version %s' % (cse_info['product'],
                                      cse_info['description'],
                                      cse_info['version'])
    stdout(cse_info, ctx, ver_str)
    CLIENT_LOGGER.debug(ver_str)
示例#5
0
def ovdc_disable(ctx,
                 ovdc_name,
                 org_name,
                 disable_native,
                 disable_tkg_plus=None,
                 remove_cp_from_vms_on_disable=False):
    """Disable Kubernetes cluster deployment for an org VDC.

\b
Examples
    vcd cse ovdc disable --native --org org1 ovdc1
        Disable native cluster deployment in ovdc1 of org1.
        Supported only for vcd api version >= 35.
\b
    vcd cse ovdc disable --native --org org1 --force ovdc1
        Force disable native cluster deployment in ovdc1 of org1.
        Replaces CSE policies with VCD default policies.
        Supported only for vcd api version >= 35.
\b
    vcd cse ovdc disable ovdc3
        Disable ovdc3 for any further native cluster deployments.
        Supported only for vcd api version < 35.

    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    if not (disable_native or disable_tkg_plus):
        msg = "Please specify at least one k8 runtime to disable"
        stderr(msg, ctx)
        CLIENT_LOGGER.error(msg)
    k8_runtime = []
    if disable_native:
        k8_runtime.append(shared_constants.ClusterEntityKind.NATIVE.value)
    if disable_tkg_plus:
        k8_runtime.append(shared_constants.ClusterEntityKind.TKG_PLUS.value)
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if client.is_sysadmin():
            ovdc = Ovdc(client)
            if org_name is None:
                org_name = ctx.obj['profiles'].get('org_in_use')
            result = ovdc.update_ovdc(
                enable=False,
                ovdc_name=ovdc_name,
                org_name=org_name,
                k8s_runtime=k8_runtime,
                remove_cp_from_vms_on_disable=remove_cp_from_vms_on_disable
            )  # noqa: E501
            stdout(result, ctx)
            CLIENT_LOGGER.debug(result)
        else:
            msg = "Insufficient permission to perform operation."
            stderr(msg, ctx)
            CLIENT_LOGGER.error(msg)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def cluster_share(ctx, name, acl, users, vdc, org, k8_runtime, cluster_id):
    """Share cluster with users.

Either the cluster name or cluster id is required.
By default, this command searches for the cluster in the currently logged in user's org.

Note: this command does not remove an ACL entry.

\b
Examples:
    vcd cse cluster share --name mycluster --acl FullControl user1 user2
        Share cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
    vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 --acl ReadOnly user1
        Share TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
        with ReadOnly access with 'user1'
    """  # noqa: E501
    try:
        # If cluster kind is not specified, let the server handle this check
        if k8_runtime:
            def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime)  # noqa: E501

        # Verify access level and cluster name/id arguments
        access_level_id = shared_constants.ACCESS_LEVEL_TYPE_TO_ID.get(acl.lower())  # noqa: E501
        if not access_level_id:
            raise Exception(f'Please enter a valid access control type: '
                            f'{shared_constants.READ_ONLY}, '
                            f'{shared_constants.READ_WRITE}, or '
                            f'{shared_constants.FULL_CONTROL}')
        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_s_only():
            if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
                              shared_constants.ClusterEntityKind.TKG_PLUS.value]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for TKG-S
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value

        client = ctx.obj['client']
        # Users should be explicit in their intent about the org on which the
        # command needs to be executed.
        is_system_user = client.is_sysadmin()
        if not is_system_user and org is None:
            org = ctx.obj['profiles'].get('org_in_use')
        elif is_system_user and org is None:
            raise Exception("Need to specify cluster org since logged in user is in system org")  # noqa: E501

        users_list = list(users)
        cluster = Cluster(client, k8_runtime)
        cluster.share_cluster(cluster_id, name, users_list, access_level_id,
                              org, vdc)
        stdout(f'Cluster {cluster_id or name} successfully shared with: {users_list}')  # noqa: E501
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
示例#7
0
def cluster_share(ctx, name, acl, users, vdc, org, k8_runtime, cluster_id):
    """Share cluster with users.

Either the cluster name or cluster id is required.
By default, this command searches for the cluster in the currently logged in user's org.

Note: this command does not remove an ACL entry.

\b
Examples:
    vcd cse cluster share --name mycluster --acl FullControl user1 user2
        Share cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
    vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 --acl ReadOnly user1
        Share TKG cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
        with ReadOnly access with 'user1'
    """  # noqa: E501
    try:
        # Verify access level and cluster name/id arguments
        access_level_id = shared_constants.ACCESS_LEVEL_TYPE_TO_ID.get(
            acl.lower())  # noqa: E501
        if not access_level_id:
            raise Exception(f'Please enter a valid access control type: '
                            f'{shared_constants.READ_ONLY}, '
                            f'{shared_constants.READ_WRITE}, or '
                            f'{shared_constants.FULL_CONTROL}')
        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_only():
            if k8_runtime in [
                    shared_constants.ClusterEntityKind.NATIVE.value,
                    shared_constants.ClusterEntityKind.TKG_PLUS.value
            ]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for tkg
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG.value

        client = ctx.obj['client']
        if not org:
            ctx_profiles = ctx.obj['profiles']
            org = ctx_profiles.get('org')
        users_list = list(users)
        cluster = Cluster(client, k8_runtime)
        cluster.share_cluster(cluster_id, name, users_list, access_level_id,
                              org, vdc)
        stdout(
            f'Cluster {cluster_id or name} successfully shared with: {users_list}'
        )  # noqa: E501
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def _get_sample_cluster_configuration_by_k8_runtime(
        k8_runtime, server_rde_in_use):  # noqa: E501
    NativeEntityClass = rde_factory.get_rde_model(server_rde_in_use)
    sample_native_entity = NativeEntityClass.sample_native_entity(
        k8_runtime.value)  # noqa: E501
    native_entity_dict = sample_native_entity.to_dict()

    # remove status part of the entity dict
    del native_entity_dict['status']

    sample_apply_spec = yaml.dump(native_entity_dict)
    CLIENT_LOGGER.info(sample_apply_spec)
    return sample_apply_spec
def list_ovdcs(ctx, list_pks_plans, should_print_all=False):
    """Display org VDCs in vCD that are visible to the logged in user."""
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        ovdc = PksOvdc(client)
        client_utils.print_paginated_result(
            ovdc.list_ovdc(list_pks_plans=list_pks_plans),
            should_print_all=should_print_all,
            logger=CLIENT_LOGGER)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
示例#10
0
def ovdc_enable(ctx, ovdc_name, pks_plan,
                pks_cluster_domain, org_name):
    """Set Kubernetes provider to be Ent-PKS for an org VDC."""
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if client.is_sysadmin():
            ovdc = PksOvdc(client)
            if org_name is None:
                org_name = ctx.obj['profiles'].get('org_in_use')
            result = ovdc.update_ovdc(
                enable=True,
                ovdc_name=ovdc_name,
                org_name=org_name,
                pks_plan=pks_plan,
                pks_cluster_domain=pks_cluster_domain)
            stdout(result, ctx)
            CLIENT_LOGGER.debug(result)
        else:
            msg = "Insufficient permission to perform operation."
            stderr(msg, ctx)
            CLIENT_LOGGER.error(msg)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
示例#11
0
def ovdc_info(ctx, ovdc_name, org_name):
    """Display information about Kubernetes provider for an org VDC.

\b
Example
    vcd cse ovdc info ovdc1
        Display detailed information about ovdc 'ovdc1'.

    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if client.is_sysadmin():
            ovdc = Ovdc(client)
            if org_name is None:
                org_name = ctx.obj['profiles'].get('org_in_use')
            result = ovdc.info_ovdc(ovdc_name, org_name)
            stdout(yaml.dump(result), ctx)
            CLIENT_LOGGER.debug(result)
        else:
            msg = "Insufficient permission to perform operation"
            stderr(msg, ctx)
            CLIENT_LOGGER.error(msg)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def cluster_unshare(ctx, name, users, vdc, org, k8_runtime, cluster_id):
    """Remove access from current shared cluster users.

Either the cluster name or cluster id is required. By default, this command searches
for the cluster in the currently logged in user's org.

\b
Examples:
    vcd cse cluster unshare --name mycluster user1 user2
        Unshare cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
    vcd cse cluster unshare --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 user1
        Unshare TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with 'user1'
    """  # noqa: E501
    try:
        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_s_only():
            if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
                              shared_constants.ClusterEntityKind.TKG_PLUS.value]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for tkg
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value

        client = ctx.obj['client']
        # Users should be explicit in their intent about the org on which the
        # command needs to be executed.
        is_system_user = client.is_sysadmin()
        if not is_system_user and org is None:
            org = ctx.obj['profiles'].get('org_in_use')
        elif is_system_user and org is None:
            raise Exception("Need to specify cluster org since logged in user is in system org")  # noqa: E501

        # If cluster kind is not specified, let the server handle this check
        if k8_runtime:
            def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime)  # noqa: E501

        users_list = list(users)
        cluster = Cluster(client, k8_runtime)
        cluster.unshare_cluster(cluster_id, name, users_list, org, vdc)

        stdout(f'Cluster {cluster_id or name} successfully unshared with: {users_list}')  # noqa: E501
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def _get_sample_cluster_configuration_by_k8_runtime(
        k8_runtime, server_rde_in_use):  # noqa: E501
    NativeEntityClass = rde_factory.get_rde_model(server_rde_in_use)
    sample_native_entity = NativeEntityClass.sample_native_entity(
        k8_runtime.value)  # noqa: E501
    native_entity_dict = sample_native_entity.to_dict()

    # remove status part of the entity dict
    del native_entity_dict['status']

    # Hiding the network spec section for Andromeda (CSE 3.1)
    # spec.settings.network is targeted for CSE 3.1.1 to accommodate CNI=Antrea
    # Below line can be deleted post Andromeda (CSE 3.1)
    del native_entity_dict['spec']['settings']['network']

    sample_apply_spec = yaml.dump(native_entity_dict)
    CLIENT_LOGGER.info(sample_apply_spec)
    return sample_apply_spec
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime,
                       cluster_id):
    """List cluster shared user information.

    Either the cluster name or cluster id is required.
\b
Examples:
    vcd cse cluster share-list --name mycluster
        List shared user information for cluster 'mycluster'
\b
    vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057
        List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
    """  # noqa: E501
    try:
        # If cluster kind is not specified, let the server handle this check
        if k8_runtime:
            def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime)  # noqa: E501

        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_s_only():
            if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
                              shared_constants.ClusterEntityKind.TKG_PLUS.value]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for TKG-S
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value

        # Determine cluster type and retrieve cluster id if needed
        client = ctx.obj['client']
        # Users should be explicit in their intent about the org on which the
        # command needs to be executed.
        is_system_user = client.is_sysadmin()
        if not is_system_user and org is None:
            org = ctx.obj['profiles'].get('org_in_use')
        elif is_system_user and org is None:
            raise Exception("Need to specify cluster org since logged in user is in system org")  # noqa: E501

        cluster = Cluster(client, k8_runtime)
        share_entries = cluster.list_share_entries(cluster_id, name, org, vdc)
        client_utils.print_paginated_result(share_entries, should_print_all)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
示例#15
0
    def get_command(self, ctx, cmd_name):
        """Override this click method to customize.

        :param click.core.Context ctx: Click Context
        :param str cmd_name: name of the command (ex:create, delete, resize)
        :return: Click command object for 'cmd_name'
        :rtype: click.Core.Command
        """
        try:
            if not type(ctx.obj) is dict or not ctx.obj.get('client'):
                client_utils.cse_restore_session(ctx)
            client = ctx.obj['client']
            version = client.get_api_version()

            # Skipping some commands when CSE server is not running
            if client_utils.is_cli_for_tkg_only() and \
                cmd_name in [*UNSUPPORTED_COMMANDS_WITH_SERVER_NOT_RUNNING_BY_VERSION.get(version, []),  # noqa: E501
                             *UNSUPPORTED_SUBCOMMANDS_WITH_SERVER_NOT_RUNNING_BY_VERSION.get(version, {}).get(self.name, [])]:  # noqa: E501
                return None

            # Skip the command if not supported
            if cmd_name in UNSUPPORTED_COMMANDS_BY_VERSION.get(version, []):
                return None

            # Skip the subcommand if not supported
            unsupported_subcommands = UNSUPPORTED_SUBCOMMANDS_BY_VERSION.get(
                version, {}).get(self.name, [])  # noqa: E501
            if cmd_name in unsupported_subcommands:
                return None

            cmd = click.Group.get_command(self, ctx, cmd_name)
            unsupported_params = UNSUPPORTED_SUBCOMMAND_OPTIONS_BY_VERSION.get(
                version, {}).get(self.name, {}).get(cmd_name, [])  # noqa: E501
            # Remove all unsupported options for this subcommand, if any
            filtered_params = [
                param for param in cmd.params
                if param.name not in unsupported_params
            ]  # noqa: E501
            cmd.params = filtered_params
        except Exception as e:
            CLIENT_LOGGER.debug(f'exception while filtering {cmd_name}: {e}')
            pass

        return click.Group.get_command(self, ctx, cmd_name)
示例#16
0
def ovdc_enable(ctx, ovdc_name, org_name, enable_native, enable_tkg_plus=None):
    """Set Kubernetes provider for an org VDC.

\b
Example
    vcd cse ovdc enable --native --org org1 ovdc1
        Enable native cluster deployment in ovdc1 of org1.
        Supported only for vcd api version >= 35.
\b
    vcd cse ovdc enable ovdc1
        Enable ovdc1 for native cluster deployment.
        Supported only for vcd api version < 35.
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    if not (enable_native or enable_tkg_plus):
        msg = "Please specify at least one k8 runtime to enable"
        stderr(msg, ctx)
        CLIENT_LOGGER.error(msg)
    k8_runtime = []
    if enable_native:
        k8_runtime.append(shared_constants.ClusterEntityKind.NATIVE.value)
    if enable_tkg_plus:
        k8_runtime.append(shared_constants.ClusterEntityKind.TKG_PLUS.value)
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if client.is_sysadmin():
            ovdc = Ovdc(client)
            if org_name is None:
                org_name = ctx.obj['profiles'].get('org_in_use')
            result = ovdc.update_ovdc(enable=True,
                                      ovdc_name=ovdc_name,
                                      org_name=org_name,
                                      k8s_runtime=k8_runtime)
            stdout(result, ctx)
            CLIENT_LOGGER.debug(result)
        else:
            msg = "Insufficient permission to perform operation."
            stderr(msg, ctx)
            CLIENT_LOGGER.error(msg)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
示例#17
0
def cluster_unshare(ctx, name, users, vdc, org, k8_runtime, cluster_id):
    """Remove access from current shared cluster users.

Either the cluster name or cluster id is required. By default, this command searches
for the cluster in the currently logged in user's org.

\b
Examples:
    vcd cse cluster unshare --name mycluster user1 user2
        Unshare cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
    vcd cse cluster unshare --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 user1
        Unshare TKG cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with 'user1'
    """  # noqa: E501
    try:
        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_only():
            if k8_runtime in [
                    shared_constants.ClusterEntityKind.NATIVE.value,
                    shared_constants.ClusterEntityKind.TKG_PLUS.value
            ]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for tkg
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG.value

        client = ctx.obj['client']
        if not org:
            ctx_profiles = ctx.obj['profiles']
            org = ctx_profiles.get('org')
        users_list = list(users)
        cluster = Cluster(client, k8_runtime)
        cluster.unshare_cluster(cluster_id, name, users_list, org, vdc)

        stdout(
            f'Cluster {cluster_id or name} successfully unshared with: {users_list}'
        )  # noqa: E501
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def list_nodes(ctx, name, org, vdc):
    """Display nodes of a cluster that uses native Kubernetes provider.

\b
Example
    vcd cse node list mycluster
        Displays nodes in 'mycluster'.

    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if org is None and not client.is_sysadmin():
            org = ctx.obj['profiles'].get('org_in_use')
        cluster = Cluster(client)
        cluster_info = cluster.get_cluster_info(name, org=org, vdc=vdc)
        if cluster_info.get(K8S_PROVIDER_KEY) != K8sProvider.NATIVE:
            raise Exception("'node list' operation is not supported by non "
                            "native clusters.")
        all_nodes = cluster_info['master_nodes'] + cluster_info['nodes']
        value_field_to_display_field = {
            'name': 'Name',
            'ipAddress': 'IP Address',
            'numberOfCpus': 'Number of CPUs',
            'memoryMB': 'Memory MB'
        }
        filtered_nodes = client_utils.filter_columns(all_nodes, value_field_to_display_field)  # noqa: E501
        stdout(filtered_nodes, ctx, show_id=True, sort_headers=False)
        CLIENT_LOGGER.debug(all_nodes)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_config(ctx, cluster_name, vdc, org):
    """Display Ent-PKS cluster configuration.

    To write to a file: `vcd cse pks-cluster config mycluster > ~/.kube/my_config`  # noqa: E501
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        cluster = PksCluster(client)
        if not client.is_sysadmin() and org is None:
            org = ctx.obj['profiles'].get('org_in_use')
        cluster_config = cluster.get_cluster_config(
            cluster_name, vdc=vdc,
            org=org).get(RESPONSE_MESSAGE_KEY)  # noqa: E501
        # Config information with linux new-line should be converted to
        # carriage-return to output in windows console.
        if os.name == 'nt':
            cluster_config = str.replace(cluster_config, '\n', '\r\n')

        click.secho(cluster_config)
        CLIENT_LOGGER.debug(cluster_config)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_create(ctx, cluster_name, vdc, node_count, org_name):
    """Create an Ent-PKS Kubernetes cluster (max name length is 25 characters)."""  # noqa: E501
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:

        client_utils.cse_restore_session(ctx)
        if vdc is None:
            vdc = ctx.obj['profiles'].get('vdc_in_use')
            if not vdc:
                raise Exception("Virtual datacenter context is not set. "
                                "Use either command 'vcd vdc use' or option "
                                "'--vdc' to set the vdc context.")
        if org_name is None:
            org_name = ctx.obj['profiles'].get('org_in_use')
        client = ctx.obj['client']
        cluster = PksCluster(client)
        result = cluster.create_cluster(vdc,
                                        cluster_name,
                                        node_count=node_count,
                                        org=org_name)
        stdout(result, ctx)
        CLIENT_LOGGER.debug(result)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_upgrade(ctx, cluster_name, template_name, template_revision,
                    vdc, org_name, k8_runtime=None):
    """Upgrade cluster software to specified template's software versions.

\b
Example
    vcd cse cluster upgrade my-cluster ubuntu-16.04_k8-1.18_weave-2.6.4 1
        Upgrade cluster 'mycluster' Docker-CE, Kubernetes, and CNI to match
        template 'ubuntu-16.04_k8-1.18_weave-2.6.4' at revision 1.
        Affected software: Docker-CE, Kubernetes, CNI
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    # NOTE: Command is exposed only if CLI is enabled for native
    try:
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_s_only():
            if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
                              shared_constants.ClusterEntityKind.TKG_PLUS.value]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for native
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
        client = ctx.obj['client']
        cluster = Cluster(client, k8_runtime=k8_runtime)
        if not client.is_sysadmin() and org_name is None:
            org_name = ctx.obj['profiles'].get('org_in_use')

        result = cluster.upgrade_cluster(cluster_name, template_name,
                                         template_revision, ovdc_name=vdc,
                                         org_name=org_name)
        stdout(result, ctx)
        CLIENT_LOGGER.debug(result)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def node_info(ctx, cluster_name, node_name, org_name, vdc):
    """Display info about a node in a native Kubernetes provider cluster.

\b
Example
    vcd cse node info mycluster node-xxxx
        Display detailed information about node 'node-xxxx' in cluster
        'mycluster'.
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        cluster = Cluster(client)

        if org_name is None and not client.is_sysadmin():
            org_name = ctx.obj['profiles'].get('org_in_use')
        node_info = cluster.get_node_info(cluster_name, node_name,
                                          org_name, vdc)
        value_field_to_display_field = {
            'name': 'Name',
            'node_type': 'Node Type',
            'ipAddress': 'IP Address',
            'numberOfCpus': 'Number of CPUs',
            'memoryMB': 'Memory MB',
            'status': 'Status'
        }
        filtered_node_info = client_utils.filter_columns(
            node_info, value_field_to_display_field)
        stdout(filtered_node_info, ctx, sort_headers=False)
        CLIENT_LOGGER.debug(filtered_node_info)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
 def list_clusters(self, vdc=None, org=None):
     filters = {
         shared_constants.RequestKey.ORG_NAME: org,
         shared_constants.RequestKey.OVDC_NAME: vdc}
     for clusters_rep, has_more_results in \
             self._native_cluster_api.get_all_clusters(filters=filters):
         clusters = []
         CLIENT_LOGGER.debug(clusters_rep)
         for c in clusters_rep:
             # TODO cluster api response keys need to be more well defined
             cluster = {
                 'Name': c.get('name', 'N/A'),
                 'Owner': c.get('owner_name', 'N/A'),
                 'VDC': c.get('vdc', 'N/A'),
                 'Org': c.get('org_name', 'N/A'),
                 'K8s Runtime': c.get('k8s_type', 'N/A'),
                 'K8s Version': c.get('k8s_version', 'N/A'),
                 'Status': c.get('status', 'N/A'),
                 'Provider': c.get('k8s_provider', 'N/A'),
             }
             clusters.append(cluster)
         yield clusters, has_more_results
def _get_sample_cluster_configuration_by_k8_runtime(k8_runtime, server_rde_in_use):  # noqa: E501
    NativeEntityClass = rde_factory.get_rde_model(server_rde_in_use)
    sample_native_entity = NativeEntityClass.sample_native_entity(k8_runtime.value)  # noqa: E501
    native_entity_dict = sample_native_entity.to_dict()

    # remove status part of the entity dict
    del native_entity_dict['status']

    # Hiding the network spec section for Andromeda (CSE 3.1)
    # spec.settings.network is targeted for CSE 3.1.1 to accommodate CNI=Antrea
    # Below line can be deleted post Andromeda (CSE 3.1)
    del native_entity_dict['spec']['settings']['network']
    # Hiding the cpu and memory properties from controlPlane and workers for
    # Andromeda (CSE 3.1). Below lines can be deleted once cpu and memory
    # support is added in CSE 3.1.1
    del native_entity_dict['spec']['topology']['controlPlane']['cpu']
    del native_entity_dict['spec']['topology']['controlPlane']['memory']
    del native_entity_dict['spec']['topology']['workers']['cpu']
    del native_entity_dict['spec']['topology']['workers']['memory']

    sample_apply_spec = yaml.dump(native_entity_dict)
    CLIENT_LOGGER.info(sample_apply_spec)
    return sample_apply_spec
示例#25
0
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime,
                       cluster_id):
    """List cluster shared user information.

    Either the cluster name or cluster id is required.
\b
Examples:
    vcd cse cluster share-list --name mycluster
        List shared user information for cluster 'mycluster'
\b
    vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057
        List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
    """  # noqa: E501
    try:
        if not (cluster_id or name):
            raise Exception("Please specify cluster name or cluster id.")
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_only():
            if k8_runtime in [
                    shared_constants.ClusterEntityKind.NATIVE.value,
                    shared_constants.ClusterEntityKind.TKG_PLUS.value
            ]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for tkg
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG.value

        # Determine cluster type and retrieve cluster id if needed
        client = ctx.obj['client']
        if not org:
            ctx_profiles = ctx.obj['profiles']
            org = ctx_profiles.get('org')
        cluster = Cluster(client, k8_runtime)
        share_entries = cluster.list_share_entries(cluster_id, name, org, vdc)
        client_utils.print_paginated_result(share_entries, should_print_all)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
示例#26
0
def list_ovdcs(ctx, should_print_all=False):
    """Display org VDCs in vCD that are visible to the logged in user.

\b
Example
    vcd cse ovdc list
        Display ovdcs in vCD that are visible to the logged in user.
        The user might be prompted if more results needs to be displayed
\b
    vcd cse ovdc list -A
        Display ovdcs in vCD that are visible to the logged in user without
        prompting the user.
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        ovdc = Ovdc(client)
        client_utils.print_paginated_result(ovdc.list_ovdc(),
                                            should_print_all=should_print_all,
                                            logger=CLIENT_LOGGER)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))
def create_node(ctx, cluster_name, node_count, org, vdc, cpu, memory,
                network_name, storage_profile, ssh_key_file, template_name,
                template_revision, enable_nfs, disable_rollback):
    """Add node(s) to a cluster that uses native Kubernetes provider.

\b
Example
    vcd cse node create mycluster --nodes 2 --enable-nfs --network mynetwork \\
    --template-name photon-v2 --template-revision 1 --cpu 3 --memory 1024 \\
    --storage-profile mystorageprofile --ssh-key ~/.ssh/id_rsa.pub \\
        Add 2 nfs nodes to vApp named 'mycluster' on vCD.
        The nodes will be connected to org VDC network 'mynetwork'.
        All VMs will use the template 'photon-v2'.
        Each VM will have 3 vCPUs and 1024mb of memory.
        All VMs will use the storage profile 'mystorageprofile'.
        The public ssh key at '~/.ssh/id_rsa.pub' will be placed into all
        VMs for user accessibility.

    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        if (template_name and not template_revision) or \
                (not template_name and template_revision):
            raise Exception("Both --template-name (-t) and "
                            "--template-revision (-r) must be specified.")

        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if org is None and not client.is_sysadmin():
            org = ctx.obj['profiles'].get('org_in_use')
        cluster = Cluster(client)
        ssh_key = None
        if ssh_key_file is not None:
            ssh_key = ssh_key_file.read()
        result = cluster.add_node(
            network_name,
            cluster_name,
            node_count=node_count,
            org=org,
            vdc=vdc,
            cpu=cpu,
            memory=memory,
            storage_profile=storage_profile,
            ssh_key=ssh_key,
            template_name=template_name,
            template_revision=template_revision,
            enable_nfs=enable_nfs,
            rollback=not disable_rollback)
        stdout(result, ctx)
        CLIENT_LOGGER.debug(result)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def reload_templates(ctx):
    """Reload CSE native and TKG templates."""
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        client = ctx.obj['client']
        if client.is_sysadmin():
            template = Template(client)
            result = template.reload_templates()
            CLIENT_LOGGER.debug(result)
            stdout(result, ctx)
        else:
            msg = "Insufficient permission to perform operation."
            stderr(msg, ctx)
            CLIENT_LOGGER.error(msg)

    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_delete(ctx, name, vdc, org, force=False, k8_runtime=None, cluster_id=None):  # noqa: E501
    """Delete a Kubernetes cluster.

\b
Example
    vcd cse cluster delete mycluster --yes
        Delete cluster 'mycluster' without prompting.
        '--vdc' option can be used for faster command execution.
\b
    vcd cse cluster delete mycluster --force
        Force delete the native entity type cluster regardless of the state of the cluster. Force delete removes
        Runtime Defined Entity, vApp and DNAT rule, if any, that represents the cluster.
\b
    vcd cse cluster delete --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561
        Delete cluster with cluster ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561'.
        (--id option is supported only applicable for api version >= 35)
    """  # noqa: E501
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        if not (cluster_id or name):
            # --id is not required when working with api version 33 and 34
            raise Exception("Please specify cluster name (or) cluster Id. "
                            "Note that '--id' flag is applicable for API versions >= 35 only.")  # noqa: E501

        client = ctx.obj['client']
        if client_utils.is_cli_for_tkg_s_only():
            if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
                # Cannot run the command as cse cli is enabled only for native
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
        cluster = Cluster(client, k8_runtime=k8_runtime)
        if not client.is_sysadmin() and org is None:
            org = ctx.obj['profiles'].get('org_in_use')
        if force:
            result = cluster.force_delete_cluster(
                name,
                cluster_id=cluster_id,
                org=org, vdc=vdc
            )
        else:
            result = cluster.delete_cluster(
                name, cluster_id=cluster_id, org=org, vdc=vdc)
        if len(result) == 0:
            # TODO(CLI): Update message to use vcd task wait instead
            click.secho(f"Delete cluster operation has been initiated on "
                        f"{name}, please check the status using"
                        f" 'vcd cse cluster info {name}'.", fg='yellow')
        stdout(result, ctx)
        CLIENT_LOGGER.debug(result)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e), exc_info=True)
示例#30
0
def cluster_upgrade_plan(ctx, cluster_name, vdc, org_name, k8_runtime=None):
    """Display templates that the specified cluster can upgrade to.

\b
Examples
    vcd cse cluster upgrade-plan my-cluster
    (Supported only for vcd api version < 35)
\b
    vcd cse cluster upgrade-plan --k8-runtime native my-cluster
    (Supported only for vcd api version >= 35)
    """
    CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
    try:
        client_utils.cse_restore_session(ctx)
        if client_utils.is_cli_for_tkg_only():
            if k8_runtime in [
                    shared_constants.ClusterEntityKind.NATIVE.value,
                    shared_constants.ClusterEntityKind.TKG_PLUS.value
            ]:  # noqa: E501
                # Cannot run the command as cse cli is enabled only for native
                raise CseServerNotRunningError()
            k8_runtime = shared_constants.ClusterEntityKind.TKG.value
        client = ctx.obj['client']
        cluster = Cluster(client, k8_runtime=k8_runtime)
        if not client.is_sysadmin() and org_name is None:
            org_name = ctx.obj['profiles'].get('org_in_use')

        templates = cluster.get_upgrade_plan(cluster_name,
                                             vdc=vdc,
                                             org=org_name)
        result = []
        for template in templates:
            result.append({
                'Template Name':
                template[LocalTemplateKey.NAME],
                'Template Revision':
                template[LocalTemplateKey.REVISION],
                'Kubernetes':
                template[LocalTemplateKey.KUBERNETES_VERSION],
                'Docker-CE':
                template[LocalTemplateKey.DOCKER_VERSION],
                'CNI':
                f"{template[LocalTemplateKey.CNI]} {template[LocalTemplateKey.CNI_VERSION]}"  # noqa: E501
            })

        if not templates:
            result = f"No valid upgrade targets for cluster '{cluster_name}'"
        stdout(result, ctx, sort_headers=False)
        CLIENT_LOGGER.debug(result)
    except Exception as e:
        stderr(e, ctx)
        CLIENT_LOGGER.error(str(e))