def cluster_upgrade(ctx, cluster_name, template_name, template_revision, vdc, org_name, k8_runtime=None): """Upgrade cluster software to specified template's software versions. \b Example vcd cse cluster upgrade my-cluster ubuntu-16.04_k8-1.18_weave-2.6.4 1 Upgrade cluster 'mycluster' Docker-CE, Kubernetes, and CNI to match template 'ubuntu-16.04_k8-1.18_weave-2.6.4' at revision 1. Affected software: Docker-CE, Kubernetes, CNI """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') # NOTE: Command is exposed only if CLI is enabled for native try: client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value client = ctx.obj['client'] cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') result = cluster.upgrade_cluster(cluster_name, template_name, template_revision, ovdc_name=vdc, org_name=org_name) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_share(ctx, name, acl, users, vdc, org, k8_runtime, cluster_id): """Share cluster with users. Either the cluster name or cluster id is required. By default, this command searches for the cluster in the currently logged in user's org. Note: this command does not remove an ACL entry. \b Examples: vcd cse cluster share --name mycluster --acl FullControl user1 user2 Share cluster 'mycluster' with FullControl access with 'user1' and 'user2' \b vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 --acl ReadOnly user1 Share TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with ReadOnly access with 'user1' """ # noqa: E501 try: # If cluster kind is not specified, let the server handle this check if k8_runtime: def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501 # Verify access level and cluster name/id arguments access_level_id = shared_constants.ACCESS_LEVEL_TYPE_TO_ID.get(acl.lower()) # noqa: E501 if not access_level_id: raise Exception(f'Please enter a valid access control type: ' f'{shared_constants.READ_ONLY}, ' f'{shared_constants.READ_WRITE}, or ' f'{shared_constants.FULL_CONTROL}') if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501 # Cannot run the command as cse cli is enabled only for TKG-S raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value client = ctx.obj['client'] # Users should be explicit in their intent about the org on which the # command needs to be executed. is_system_user = client.is_sysadmin() if not is_system_user and org is None: org = ctx.obj['profiles'].get('org_in_use') elif is_system_user and org is None: raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501 users_list = list(users) cluster = Cluster(client, k8_runtime) cluster.share_cluster(cluster_id, name, users_list, access_level_id, org, vdc) stdout(f'Cluster {cluster_id or name} successfully shared with: {users_list}') # noqa: E501 except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_delete(ctx, name, vdc, org, force=False, k8_runtime=None, cluster_id=None): # noqa: E501 """Delete a Kubernetes cluster. \b Example vcd cse cluster delete mycluster --yes Delete cluster 'mycluster' without prompting. '--vdc' option can be used for faster command execution. \b vcd cse cluster delete mycluster --force Force delete the native entity type cluster regardless of the state of the cluster. Force delete removes Runtime Defined Entity, vApp and DNAT rule, if any, that represents the cluster. \b vcd cse cluster delete --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561 Delete cluster with cluster ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561'. (--id option is supported only applicable for api version >= 35) """ # noqa: E501 CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) if not (cluster_id or name): # --id is not required when working with api version 33 and 34 raise Exception("Please specify cluster name (or) cluster Id. " "Note that '--id' flag is applicable for API versions >= 35 only.") # noqa: E501 client = ctx.obj['client'] if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES: # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') if force: result = cluster.force_delete_cluster( name, cluster_id=cluster_id, org=org, vdc=vdc ) else: result = cluster.delete_cluster( name, cluster_id=cluster_id, org=org, vdc=vdc) if len(result) == 0: # TODO(CLI): Update message to use vcd task wait instead click.secho(f"Delete cluster operation has been initiated on " f"{name}, please check the status using" f" 'vcd cse cluster info {name}'.", fg='yellow') stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_upgrade_plan(ctx, cluster_name, vdc, org_name, k8_runtime=None): """Display templates that the specified cluster can upgrade to. \b Examples vcd cse cluster upgrade-plan my-cluster (Supported only for vcd api version < 35) \b vcd cse cluster upgrade-plan --k8-runtime native my-cluster (Supported only for vcd api version >= 35) """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value client = ctx.obj['client'] cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') templates = cluster.get_upgrade_plan(cluster_name, vdc=vdc, org=org_name) result = [] for template in templates: result.append({ 'Template Name': template[LocalTemplateKey.NAME], 'Template Revision': template[LocalTemplateKey.REVISION], 'Kubernetes': template[LocalTemplateKey.KUBERNETES_VERSION], 'Docker-CE': template[LocalTemplateKey.DOCKER_VERSION], 'CNI': f"{template[LocalTemplateKey.CNI]} {template[LocalTemplateKey.CNI_VERSION]}" # noqa: E501 }) if not templates: result = f"No valid upgrade targets for cluster '{cluster_name}'" stdout(result, ctx, sort_headers=False) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_share(ctx, name, acl, users, vdc, org, k8_runtime, cluster_id): """Share cluster with users. Either the cluster name or cluster id is required. By default, this command searches for the cluster in the currently logged in user's org. Note: this command does not remove an ACL entry. \b Examples: vcd cse cluster share --name mycluster --acl FullControl user1 user2 Share cluster 'mycluster' with FullControl access with 'user1' and 'user2' \b vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 --acl ReadOnly user1 Share TKG cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with ReadOnly access with 'user1' """ # noqa: E501 try: # Verify access level and cluster name/id arguments access_level_id = shared_constants.ACCESS_LEVEL_TYPE_TO_ID.get( acl.lower()) # noqa: E501 if not access_level_id: raise Exception(f'Please enter a valid access control type: ' f'{shared_constants.READ_ONLY}, ' f'{shared_constants.READ_WRITE}, or ' f'{shared_constants.FULL_CONTROL}') if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for tkg raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value client = ctx.obj['client'] if not org: ctx_profiles = ctx.obj['profiles'] org = ctx_profiles.get('org') users_list = list(users) cluster = Cluster(client, k8_runtime) cluster.share_cluster(cluster_id, name, users_list, access_level_id, org, vdc) stdout( f'Cluster {cluster_id or name} successfully shared with: {users_list}' ) # noqa: E501 except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_delete(ctx, name, vdc, org, k8_runtime=None, cluster_id=None): """Delete a Kubernetes cluster. \b Example vcd cse cluster delete mycluster --yes Delete cluster 'mycluster' without prompting. '--vdc' option can be used for faster command execution. \b vcd cse cluster delete --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561 Delete cluster with cluster ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561'. (--id option is suported only applicable for api version >= 35) """ # noqa: E501 CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) if not (cluster_id or name): # --id is not required when working with api version 33 and 34 raise Exception( "Please specify cluster name (or) cluster Id. " "Note that '--id' flag is applicable for API versions >= 35 only." ) # noqa: E501 client = ctx.obj['client'] if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') result = cluster.delete_cluster(name, cluster_id=cluster_id, org=org, vdc=vdc) if len(result) == 0: click.secho( f"Delete cluster operation has been initiated on " f"{name}, please check the status using" f" 'vcd cse cluster info {name}'.", fg='yellow') stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_config(ctx, name, vdc, org, k8_runtime=None, cluster_id=None): """Display cluster configuration. \b Examples: vcd cse cluster config my-cluster (Supported only for vcd api version < 35) \b vcd cse cluster config -k native my-cluster (Supported only for vcd api version >= 35) To write to a file: `vcd cse cluster config mycluster > ~/.kube/my_config` \b vcd cse cluster config --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561 (--id option is supported only for vcd api version >= 35) """ # noqa: E501 CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: if not (cluster_id or name): # --id is not required when working with api version 33 and 34 raise Exception( "Please specify cluster name (or) cluster Id. " "Note that '--id' flag is applicable for API versions >= 35 only." ) # noqa: E501 client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value client = ctx.obj['client'] cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') cluster_config = \ cluster.get_cluster_config(name, cluster_id=cluster_id, vdc=vdc, org=org) \ .get(shared_constants.RESPONSE_MESSAGE_KEY) # noqa: E501 if os.name == 'nt': cluster_config = str.replace(cluster_config, '\n', '\r\n') click.secho(cluster_config) CLIENT_LOGGER.debug(cluster_config) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_unshare(ctx, name, users, vdc, org, k8_runtime, cluster_id): """Remove access from current shared cluster users. Either the cluster name or cluster id is required. By default, this command searches for the cluster in the currently logged in user's org. \b Examples: vcd cse cluster unshare --name mycluster user1 user2 Unshare cluster 'mycluster' with FullControl access with 'user1' and 'user2' \b vcd cse cluster unshare --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 user1 Unshare TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with 'user1' """ # noqa: E501 try: if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501 # Cannot run the command as cse cli is enabled only for tkg raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value client = ctx.obj['client'] # Users should be explicit in their intent about the org on which the # command needs to be executed. is_system_user = client.is_sysadmin() if not is_system_user and org is None: org = ctx.obj['profiles'].get('org_in_use') elif is_system_user and org is None: raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501 # If cluster kind is not specified, let the server handle this check if k8_runtime: def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501 users_list = list(users) cluster = Cluster(client, k8_runtime) cluster.unshare_cluster(cluster_id, name, users_list, org, vdc) stdout(f'Cluster {cluster_id or name} successfully unshared with: {users_list}') # noqa: E501 except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime, cluster_id): """List cluster shared user information. Either the cluster name or cluster id is required. \b Examples: vcd cse cluster share-list --name mycluster List shared user information for cluster 'mycluster' \b vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' """ # noqa: E501 try: # If cluster kind is not specified, let the server handle this check if k8_runtime: def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501 if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501 # Cannot run the command as cse cli is enabled only for TKG-S raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value # Determine cluster type and retrieve cluster id if needed client = ctx.obj['client'] # Users should be explicit in their intent about the org on which the # command needs to be executed. is_system_user = client.is_sysadmin() if not is_system_user and org is None: org = ctx.obj['profiles'].get('org_in_use') elif is_system_user and org is None: raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501 cluster = Cluster(client, k8_runtime) share_entries = cluster.list_share_entries(cluster_id, name, org, vdc) client_utils.print_paginated_result(share_entries, should_print_all) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)
def cluster_info(ctx, name, org, vdc, k8_runtime=None, cluster_id=None): """Display info about a Kubernetes cluster. \b Example vcd cse cluster info mycluster Display detailed information about cluster 'mycluster'. '--vdc' option can be used for faster command execution. \b vcd cse cluster info --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561 Display cluster information about cluster with ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561' (--id option is supported only for api version >= 35) """ # noqa: E501 CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: if not (cluster_id or name): # --id is not required when working with api version 33 and 34 raise Exception( "Please specify cluster name (or) cluster Id. " "Note that '--id' flag is applicable for API versions >= 35 only." ) # noqa: E501 client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value client = ctx.obj['client'] cluster = Cluster(client, k8_runtime=k8_runtime) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') result = cluster.get_cluster_info(name, cluster_id=cluster_id, org=org, vdc=vdc) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_unshare(ctx, name, users, vdc, org, k8_runtime, cluster_id): """Remove access from current shared cluster users. Either the cluster name or cluster id is required. By default, this command searches for the cluster in the currently logged in user's org. \b Examples: vcd cse cluster unshare --name mycluster user1 user2 Unshare cluster 'mycluster' with FullControl access with 'user1' and 'user2' \b vcd cse cluster unshare --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 user1 Unshare TKG cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with 'user1' """ # noqa: E501 try: if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for tkg raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value client = ctx.obj['client'] if not org: ctx_profiles = ctx.obj['profiles'] org = ctx_profiles.get('org') users_list = list(users) cluster = Cluster(client, k8_runtime) cluster.unshare_cluster(cluster_id, name, users_list, org, vdc) stdout( f'Cluster {cluster_id or name} successfully unshared with: {users_list}' ) # noqa: E501 except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime, cluster_id): """List cluster shared user information. Either the cluster name or cluster id is required. \b Examples: vcd cse cluster share-list --name mycluster List shared user information for cluster 'mycluster' \b vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' """ # noqa: E501 try: if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for tkg raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value # Determine cluster type and retrieve cluster id if needed client = ctx.obj['client'] if not org: ctx_profiles = ctx.obj['profiles'] org = ctx_profiles.get('org') cluster = Cluster(client, k8_runtime) share_entries = cluster.list_share_entries(cluster_id, name, org, vdc) client_utils.print_paginated_result(share_entries, should_print_all) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def apply(ctx, cluster_config_file_path, generate_sample_config, k8_runtime, output, org, cluster_id): # noqa: E501 CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: console_message_printer = utils.ConsoleMessagePrinter() if cluster_config_file_path and (generate_sample_config or output or k8_runtime): # noqa: E501 console_message_printer.general_no_color(ctx.get_help()) msg = "-s/-o/-n/-t/-k flag can't be used together with CLUSTER_CONFIG_FILE_PATH" # noqa: E501 CLIENT_LOGGER.error(msg) raise Exception(msg) if not cluster_config_file_path and not generate_sample_config: console_message_printer.general_no_color(ctx.get_help()) msg = "No option chosen/invalid option" CLIENT_LOGGER.error(msg) raise Exception(msg) client = ctx.obj['client'] if generate_sample_config: if not k8_runtime: console_message_printer.general_no_color(ctx.get_help()) msg = "with option --sample you must specify either of options: --native or --tkg-s" # noqa: E501 if utils.is_environment_variable_enabled(cli_constants.ENV_CSE_TKG_PLUS_ENABLED): # noqa: E501 msg += " or --tkg-plus" CLIENT_LOGGER.error(msg) raise Exception(msg) elif k8_runtime == shared_constants.ClusterEntityKind.TKG_PLUS.value \ and not utils.is_environment_variable_enabled(cli_constants.ENV_CSE_TKG_PLUS_ENABLED): # noqa: E501 raise Exception(f"{shared_constants.ClusterEntityKind.TKG_PLUS.value} not enabled") # noqa: E501 else: # since apply command is not exposed when CSE server is not # running, it is safe to get the server_rde_version from # VCD API version as VCD API version will be the supported by # CSE server. server_rde_version = \ def_utils.get_runtime_rde_version_by_vcd_api_version( client.get_api_version()) sample_cluster_config = \ client_sample_generator.get_sample_cluster_configuration( output=output, k8_runtime=k8_runtime, server_rde_in_use=server_rde_version) console_message_printer.general_no_color(sample_cluster_config) return with open(cluster_config_file_path) as f: cluster_config_map = yaml.safe_load(f) or {} k8_runtime = cluster_config_map.get('kind') if not k8_runtime: raise Exception("Cluster kind missing from the spec.") if client_utils.is_cli_for_tkg_s_only(): if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501 # Cannot run the command as cse cli is enabled only for native raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value org_name = None if k8_runtime == shared_constants.ClusterEntityKind.TKG_S.value: org_name = org if not org: org_name = ctx.obj['profiles'].get('org_in_use') cluster = Cluster(client, k8_runtime=cluster_config_map.get('kind')) result = cluster.apply(cluster_config_map, cluster_id=cluster_id, org=org_name) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)