def list_catalogs_or_items(ctx, catalog_name): try: restore_session(ctx) client = ctx.obj['client'] if catalog_name is None: in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) result = org.list_catalogs() else: result = [] if is_sysadmin(ctx): resource_type = ResourceType.ADMIN_CATALOG_ITEM.value else: resource_type = ResourceType.CATALOG_ITEM.value q = client.get_typed_query( resource_type, query_result_format=QueryResultFormat.ID_RECORDS, equality_filter=('catalogName', catalog_name)) records = list(q.execute()) if len(records) == 0: result = 'not found' else: for r in records: result.append(to_dict(r, resource_type=resource_type)) stdout(result, ctx) except Exception as e: stderr(e, ctx)
def list_disks(ctx): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) disks = vdc.get_disks() result = [] for disk in disks: attached_vms = '' if hasattr(disk, 'attached_vms') and \ hasattr(disk.attached_vms, 'VmReference'): attached_vms = disk.attached_vms.VmReference.get('name') result.append({ 'name': disk.get('name'), 'id': extract_id(disk.get('id')), 'owner': disk.Owner.User.get('name'), 'size': humanfriendly.format_size(int(disk.get('size'))), 'size_bytes': disk.get('size'), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'vms_attached': attached_vms }) stdout(result, ctx, show_id=True) except Exception as e: stderr(e, ctx)
def delete_dhcp_pool(ctx, gateway_name, pool_id): try: resource = get_dhcp_pool(ctx, gateway_name, pool_id) resource.delete_pool() stdout('DHCP Pool deleted successfully.', ctx) except Exception as e: stderr(e, ctx)
def info(ctx, catalog_name, item_name): try: restore_session(ctx) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) if item_name is None: catalog = org.get_catalog(catalog_name) result = to_dict(catalog) # We don't have a way to know in advance if a user has access to a # catalog's ACL or not. So we try to retrieve it always. If the # call fails due to permission issues, we silently eat the # exception and exclude ACL settings from the output of the current # command. Users who have access to ACL of the catalog will remain # unaffected. Also any other errors/exceptions will bubble up as # usual. try: access_control_settings = access_settings_to_dict( org.get_catalog_access_settings(catalog_name)) result.update(access_control_settings) except AccessForbiddenException as e: pass else: catalog_item = org.get_catalog_item(catalog_name, item_name) result = to_dict(catalog_item) vapp = VApp(client, href=catalog_item.Entity.get('href')) vapp.reload() template = vapp_to_dict(vapp.resource) for k, v in template.items(): result['template-%s' % k] = v stdout(result, ctx) except Exception as e: stderr(e, ctx)
def list_service_certificate(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) result = gateway_resource.list_service_certificates() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def list_dhcp_pool(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) result = gateway_resource.list_dhcp_pools() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def change_shared_key(ctx, gateway_name, new_shared_key): try: gateway_resource = get_gateway(ctx, gateway_name) gateway_resource.change_shared_key_ipsec_vpn(new_shared_key) stdout('IPsec VPN shared key changed.', ctx) except Exception as e: stderr(e, ctx)
def delete_service_certificate(ctx, gateway_name, id): try: certificate_obj = get_service_certificate(ctx, gateway_name, id) certificate_obj.delete_certificate() stdout('Service certificate deleted successfully.', ctx) except Exception as e: stderr(e, ctx)
def list(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) nat_list = gateway_resource.list_nat_rules() stdout(nat_list, ctx) except Exception as e: stderr(e, ctx)
def delete_firewall_rule_service(ctx, name, id, protocol): try: firewall_rule_resource = get_firewall_rule(ctx, name, id) firewall_rule_resource.delete_firewall_rule_service(protocol) stdout('Firewall rule\'s service deleted successfully', ctx) except Exception as e: stderr(e, ctx)
def info_ipsec_vpn(ctx, gateway_name, id): try: ipsec_vpn_obj = get_ipsec_vpn(ctx, gateway_name, id) result = ipsec_vpn_obj.get_vpn_site_info() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def info(ctx, gateway_name, rule_id): try: resource = get_nat_rule(ctx, gateway_name, rule_id) result = resource.get_nat_rule_info() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def info_activation_status(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) result = gateway_resource.info_activation_status_ipsec_vpn() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def enable_activation_status(ctx, gateway_name, enabled): try: gateway_resource = get_gateway(ctx, gateway_name) gateway_resource.enable_activation_status_ipsec_vpn(enabled) stdout('IPsec VPN activation status changed.', ctx) except Exception as e: stderr(e, ctx)
def create(ctx, name, description, catalog, template, network, memory, cpu, disk_size, ip_allocation_mode, vm_name, hostname, storage_profile, accept_all_eulas): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) if catalog is None and template is None: vapp_resource = vdc.create_vapp( name, description=description, network=network, accept_all_eulas=accept_all_eulas) else: vapp_resource = vdc.instantiate_vapp( name, catalog, template, description=description, network=network, memory=memory, cpu=cpu, disk_size=disk_size, deploy=True, power_on=True, accept_all_eulas=accept_all_eulas, cust_script=None, ip_allocation_mode=ip_allocation_mode, vm_name=vm_name, hostname=hostname, storage_profile=storage_profile) stdout(vapp_resource.Tasks.Task[0], ctx) except Exception as e: stderr(e, ctx)
def enable_logging(ctx, gateway_name, enabled): try: gateway_resource = get_gateway(ctx, gateway_name) gateway_resource.enable_logging_ipsec_vpn(enabled) stdout('IPsec VPN logging enable status changed.', ctx) except Exception as e: stderr(e, ctx)
def create(ctx, user_name, password, role_name, full_name, description, email, telephone, im, enabled, alert_enabled, alert_email, alert_email_prefix, external, default_cached, group_role, stored_vm_quota, deployed_vm_quota): try: if len(password) < 6: raise Exception('Password must be at least 6 characters long.') restore_session(ctx) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) role = org.get_role_record(role_name) role_href = role.get('href') result = org.create_user( user_name=user_name, password=password, role_href=role_href, full_name=full_name, description=description, email=email, telephone=telephone, im=im, alert_email=alert_email, alert_email_prefix=alert_email_prefix, stored_vm_quota=stored_vm_quota, deployed_vm_quota=deployed_vm_quota, is_group_role=group_role, is_default_cached=default_cached, is_external=external, is_alert_enabled=alert_enabled, is_enabled=enabled) stdout('User \'%s\' is successfully created.' % result.get('name'), ctx) except Exception as e: stderr(e, ctx)
def delete(ctx, gateway_name, rule_id): try: resource = get_nat_rule(ctx, gateway_name, rule_id) resource.delete_nat_rule() stdout('Nat Rule deleted successfully.', ctx) except Exception as e: stderr(e, ctx)
def create_isolated_network(ctx, name, gateway_ip, netmask, description, primary_dns_ip, secondary_dns_ip, dns_suffix, ip_range_start, ip_range_end, is_dhcp_enabled, default_lease_time, max_lease_time, dhcp_ip_range_start, dhcp_ip_range_end, is_shared): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] in_use_vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=in_use_vdc_href) prefix_len = netmask_to_cidr_prefix_len(gateway_ip, netmask) network_cidr = gateway_ip + '/' + str(prefix_len) result = vdc.create_isolated_vdc_network( network_name=name, network_cidr=network_cidr, description=description, primary_dns_ip=primary_dns_ip, secondary_dns_ip=secondary_dns_ip, dns_suffix=dns_suffix, ip_range_start=ip_range_start, ip_range_end=ip_range_end, is_dhcp_enabled=is_dhcp_enabled, default_lease_time=default_lease_time, max_lease_time=max_lease_time, dhcp_ip_range_start=dhcp_ip_range_start, dhcp_ip_range_end=dhcp_ip_range_end, is_shared=is_shared) stdout(result.Tasks.Task[0], ctx) except Exception as e: stderr(e, ctx)
def info_logging_settings(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) result = gateway_resource.info_logging_settings_ipsec_vpn() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def create(ctx, name, pvdc_name, network_pool_name, allocation_model, sp_name, sp_limit, description, cpu_allocated, cpu_limit): try: restore_session(ctx) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) storage_profiles = [{ 'name': sp_name, 'enabled': True, 'units': 'MB', 'limit': sp_limit, 'default': True }] vdc_resource = org.create_org_vdc( name, pvdc_name, network_pool_name=network_pool_name, description=description, allocation_model=allocation_model, cpu_allocated=cpu_allocated, cpu_limit=cpu_limit, storage_profiles=storage_profiles, uses_fast_provisioning=True, is_thin_provision=True) stdout(vdc_resource.Tasks.Task[0], ctx) except Exception as e: stderr(e, ctx)
def set_log_level(ctx, gateway_name, log_level): try: gateway_resource = get_gateway(ctx, gateway_name) gateway_resource.set_log_level_ipsec_vpn(log_level) stdout('IPsec VPN log level changed.', ctx) except Exception as e: stderr(e, ctx)
def use(ctx, name): try: restore_session(ctx) client = ctx.obj['client'] in_use_org_name = ctx.obj['profiles'].get('org_in_use') orgs = client.get_org_list() for org_resource in orgs: if org_resource.get('name').lower() == in_use_org_name.lower(): for link in get_links(org_resource, media_type=EntityType.VDC.value): if link.name == name: vdc_in_use = name vapp_in_use = '' vapp_href = '' client.get_resource(link.href) ctx.obj['profiles'].set('vdc_in_use', vdc_in_use) ctx.obj['profiles'].set('vdc_href', str(link.href)) ctx.obj['profiles'].set('vapp_in_use', vapp_in_use) ctx.obj['profiles'].set('vapp_href', vapp_href) message = 'now using org: \'%s\', vdc: \'%s\', vApp:' \ ' \'%s\'.' % (in_use_org_name, vdc_in_use, vapp_in_use) stdout({ 'org': in_use_org_name, 'vdc': vdc_in_use, 'vapp': vapp_in_use }, ctx, message) return raise Exception('Org \'%s\' not found' % in_use_org_name) except Exception as e: stderr(e, ctx)
def list_ipsec_vpn(ctx, gateway_name): try: gateway_resource = get_gateway(ctx, gateway_name) result = gateway_resource.list_ipsec_vpn() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def capture(ctx, name, catalog, template, customizable, description): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) catalog_resource = org.get_catalog(catalog) vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) vapp_resource = vdc.get_vapp(name) overwrite = False if template is None: template = vapp_resource.get('name') else: overwrite = True task = org.capture_vapp( catalog_resource, vapp_href=vapp_resource.get('href'), catalog_item_name=template, description=description, customize_on_instantiate=customizable == 'customizable', overwrite=overwrite) stdout(task, ctx) except Exception as e: stderr(e, ctx)
def delete_ipsec_vpn(ctx, gateway_name, id): try: ipsec_vpn_obj = get_ipsec_vpn(ctx, gateway_name, id) ipsec_vpn_obj.delete_ipsec_vpn() stdout('IPsec VPN deleted successfully.', ctx) except Exception as e: stderr(e, ctx)
def reorder_nat_rule(ctx, gateway_name, rule_id, index): try: gateway_resource = get_gateway(ctx, gateway_name) gateway_resource.reorder_nat_rule(rule_id, position=index) stdout('NAT rule reordered successfully.', ctx) except Exception as e: stderr(e, ctx)
def use(ctx, name): try: restore_session(ctx) client = ctx.obj['client'] org_resource = client.get_org_by_name(name) in_use_vdc = '' vdc_href = '' in_use_vapp = '' vapp_href = '' for link in get_links(org_resource, media_type=EntityType.VDC.value): in_use_vdc = link.name vdc_href = link.href break ctx.obj['profiles'].set('org_in_use', str(name)) ctx.obj['profiles'].set('org_href', str(org_resource.get('href'))) ctx.obj['profiles'].set('vdc_in_use', str(in_use_vdc)) ctx.obj['profiles'].set('vdc_href', str(vdc_href)) ctx.obj['profiles'].set('vapp_in_use', str(in_use_vapp)) ctx.obj['profiles'].set('vapp_href', vapp_href) message = 'now using org: \'%s\', vdc: \'%s\', vApp: \'%s\'.' \ % (name, in_use_vdc, in_use_vapp) stdout({ 'org': name, 'vdc': in_use_vdc, 'vapp': in_use_vapp }, ctx, message) except Exception as e: stderr(e, ctx)
def list_firewall_rule_service(ctx, name, id): try: firewall_rule_resource = get_firewall_rule(ctx, name, id) result = firewall_rule_resource.list_firewall_rule_service() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def info_dhcp_pool(ctx, gateway_name, pool_id): try: resource = get_dhcp_pool(ctx, gateway_name, pool_id) result = resource.get_pool_info() stdout(result, ctx) except Exception as e: stderr(e, ctx)
def info(ctx, catalog_name, item_name): try: restore_session(ctx) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) if item_name is None: catalog = org.get_catalog(catalog_name) result = to_dict(catalog) access_control_settings = access_settings_to_dict( org.get_catalog_access_settings(catalog_name)) result.update(access_control_settings) else: catalog_item = org.get_catalog_item(catalog_name, item_name) result = to_dict(catalog_item) vapp = VApp(client, href=catalog_item.Entity.get('href')) vapp.reload() template = vapp_to_dict(vapp.resource) for k, v in template.items(): result['template-%s' % k] = v stdout(result, ctx) except Exception as e: stderr(e, ctx)
def download(ctx, vapp_name, vm_name, vc_host, vc_user, vc_password, guest_user, guest_password, source, output): try: client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, vdc_href=vdc_href) vapp_resource = vdc.get_vapp(vapp_name) va = VApp(client, resource=vapp_resource) vs = VSphere(vc_host, vc_user, vc_password) moid = va.get_vm_moid(vm_name) vs.connect() vm = vs.get_vm_by_moid(moid) result = vs.download_file_from_guest(vm, guest_user, guest_password, source) if result.status_code == 200: if output is None: click.secho(result.content) else: output.write(result.content) else: raise Exception(result.content) except Exception as e: stderr(e, ctx)
def cluster_resize(ctx, cluster_name, node_count, org_name, vdc_name): """Resize the Ent-PKS to contain the specified number of worker nodes. Clusters that use native Kubernetes provider can not be sized down (use 'vcd cse node delete' command to do so). """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] if not client.is_sysadmin() and org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') cluster = PksCluster(client) result = cluster.resize_cluster( cluster_name, node_count=node_count, org=org_name, vdc=vdc_name) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def info(ctx, name): try: client = ctx.obj['client'] in_use_org_name = ctx.obj['profiles'].get('org_in_use') in_use_vdc = ctx.obj['profiles'].get('vdc_in_use') orgs = client.get_org_list() result = {} vdc_resource = None for org in [o for o in orgs.Org if hasattr(orgs, 'Org')]: if org.get('name') == in_use_org_name: resource = client.get_resource(org.get('href')) for v in get_links(resource, media_type=EntityType.VDC.value): if v.name == name: vdc_resource = client.get_resource(v.href) result = vdc_to_dict(vdc_resource) result['in_use'] = in_use_vdc == name result['org'] = in_use_org_name break if vdc_resource is None: raise Exception('not found') stdout(result, ctx) except Exception as e: stderr(e, ctx)
def amqp(ctx): """Manages AMQP settings in vCloud Director. \b Examples vcd system amqp info Get details of AMQP configuration. \b vcd -j system amqp info > amqp-config.json Save current AMQP configuration to file. \b vcd system amqp test amqp-config.json --password guest Test AMQP configuration. \b vcd system amqp set amqp-config.json --password guest Set AMQP configuration. """ if ctx.invoked_subcommand is not None: try: restore_session(ctx) except Exception as e: stderr(e, ctx)
def use(ctx, name): try: client = ctx.obj['client'] in_use_org_name = ctx.obj['profiles'].get('org_in_use') orgs = client.get_org_list() for org in [o for o in orgs.Org if hasattr(orgs, 'Org')]: if org.get('name') == in_use_org_name: resource = client.get_resource(org.get('href')) for v in get_links(resource, media_type=EntityType.VDC.value): if v.name == name: client.get_resource(v.href) ctx.obj['profiles'].set('vdc_in_use', str(name)) ctx.obj['profiles'].set('vdc_href', str(v.href)) message = 'now using org: \'%s\', vdc: \'%s\'.' % \ (in_use_org_name, name) stdout({ 'org': in_use_org_name, 'vdc': vdc }, ctx, message) return raise Exception('not found') except Exception as e: stderr(e, ctx)
def extension(ctx): """Manage Extension Services in vCloud Director. \b Examples vcd system extension list List available extension services. \b vcd system extension create cse cse cse vcdext '/api/cluster, /api/cluster/.*, /api/cluster/.*/.*' Register a new extension service named 'cse'. \b vcd system extension delete cse Unregister an extension service named 'cse'. \b vcd system extension info cse Get details of an extension service named 'cse'. """ # NOQA if ctx.invoked_subcommand is not None: try: restore_session(ctx) ctx.obj['ext'] = Extension(ctx.obj['client']) except Exception as e: stderr(e, ctx)
def task(ctx): """Work with tasks in vCloud Director. \b Examples vcd task list running Get list of running tasks. \b vcd task info 4a115aa5-9657-4d97-a8c2-3faf43fb45dd Get details of task by id. \b vcd task wait 4a115aa5-9657-4d97-a8c2-3faf43fb45dd Wait until task is complete. \b vcd task update aborted 4a115aa5-9657-4d97-a8c2-3faf43fb45dd Abort task by id, requires login as 'system administrator'. """ if ctx.invoked_subcommand is not None: try: restore_session(ctx) except Exception as e: stderr(e, ctx)
def cluster_config(ctx, cluster_name, vdc, org): """Display Ent-PKS cluster configuration. To write to a file: `vcd cse pks-cluster config mycluster > ~/.kube/my_config` # noqa: E501 """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] cluster = PksCluster(client) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') cluster_config = cluster.get_cluster_config(cluster_name, vdc=vdc, org=org).get(RESPONSE_MESSAGE_KEY) # noqa: E501 # Config information with linux new-line should be converted to # carriage-return to output in windows console. if os.name == 'nt': cluster_config = str.replace(cluster_config, '\n', '\r\n') click.secho(cluster_config) CLIENT_LOGGER.debug(cluster_config) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def list_templates(ctx): """Display templates that can be used to deploy native clusters.""" CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] template = Template(client) result = template.get_templates() CLIENT_LOGGER.debug(result) value_field_to_display_field = { 'name': 'Name', 'revision': 'Revision', 'is_default': 'Default', 'catalog': 'Catalog', 'catalog_item': 'Catalog Item', 'description': 'Description' } filtered_result = client_utils.filter_columns( result, value_field_to_display_field) # noqa: E501 stdout(filtered_result, ctx, sort_headers=False) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def ovdc_disable(ctx, ovdc_name, org_name, disable_native, disable_tkg_plus=None, remove_cp_from_vms_on_disable=False): """Disable Kubernetes cluster deployment for an org VDC. \b Examples vcd cse ovdc disable --native --org org1 ovdc1 Disable native cluster deployment in ovdc1 of org1. Supported only for vcd api version >= 35. \b vcd cse ovdc disable --native --org org1 --force ovdc1 Force disable native cluster deployment in ovdc1 of org1. Replaces CSE policies with VCD default policies. Supported only for vcd api version >= 35. \b vcd cse ovdc disable ovdc3 Disable ovdc3 for any further native cluster deployments. Supported only for vcd api version < 35. """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') if not (disable_native or disable_tkg_plus): msg = "Please specify at least one k8 runtime to disable" stderr(msg, ctx) CLIENT_LOGGER.error(msg) k8_runtime = [] if disable_native: k8_runtime.append(shared_constants.ClusterEntityKind.NATIVE.value) if disable_tkg_plus: k8_runtime.append(shared_constants.ClusterEntityKind.TKG_PLUS.value) try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] if client.is_sysadmin(): ovdc = Ovdc(client) if org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') result = ovdc.update_ovdc( enable=False, ovdc_name=ovdc_name, org_name=org_name, k8s_runtime=k8_runtime, remove_cp_from_vms_on_disable=remove_cp_from_vms_on_disable ) # noqa: E501 stdout(result, ctx) CLIENT_LOGGER.debug(result) else: msg = "Insufficient permission to perform operation." stderr(msg, ctx) CLIENT_LOGGER.error(msg) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime, cluster_id): """List cluster shared user information. Either the cluster name or cluster id is required. \b Examples: vcd cse cluster share-list --name mycluster List shared user information for cluster 'mycluster' \b vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' """ # noqa: E501 try: if not (cluster_id or name): raise Exception("Please specify cluster name or cluster id.") client_utils.cse_restore_session(ctx) if client_utils.is_cli_for_tkg_only(): if k8_runtime in [ shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value ]: # noqa: E501 # Cannot run the command as cse cli is enabled only for tkg raise CseServerNotRunningError() k8_runtime = shared_constants.ClusterEntityKind.TKG.value # Determine cluster type and retrieve cluster id if needed client = ctx.obj['client'] if not org: ctx_profiles = ctx.obj['profiles'] org = ctx_profiles.get('org') cluster = Cluster(client, k8_runtime) share_entries = cluster.list_share_entries(cluster_id, name, org, vdc) client_utils.print_paginated_result(share_entries, should_print_all) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def acl(ctx): """Work with catalogs access control list in the current Organization. \b Examples vcd catalog acl add my-catalog 'org:TestOrg1:Change' \\ 'user:TestUser1:FullControl' 'org:TestOrg2' Add one or more access setting to the specified catalog. access-list is specified in the format '<subject-type>:<subject-name>:<access-level>' subject-type is one of 'org' ,'user' subject-name is either username or org name access-level is one of 'ReadOnly', 'Change', 'FullControl' 'ReadOnly' by default. eg. 'org:TestOrg2' \b vcd catalog acl remove my-catalog 'org:TestOrg1' 'user:TestUser1' Remove one or more acl from the specified catalog. access-list is specified in the format '<subject-type>:<subject-name>' subject-type is one of 'org' ,'user' subject-name is either username or org name \b vcd catalog acl share my-catalog --access-level ReadOnly Share catalog access to all members of the current organization \b vcd catalog acl unshare my-catalog Unshare catalog access from all members of the current organization \b vcd catalog acl list my-catalog List acl of a catalog """ if ctx.invoked_subcommand is not None: try: restore_session(ctx) except Exception as e: stderr(e, ctx)
def cluster_create(ctx, name, vdc, node_count, cpu, memory, network_name, storage_profile, ssh_key_file, template, enable_nfs, disable_rollback, org_name): """Create a Kubernetes cluster.""" try: restore_session(ctx) client = ctx.obj['client'] cluster = Cluster(client) if vdc is None: vdc = ctx.obj['profiles'].get('vdc_in_use') if not vdc: raise Exception(f"Virtual datacenter context is not set. " "Use either command 'vcd vdc use' or option " "'--vdc' to set the vdc context.") if org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') ssh_key = None if ssh_key_file is not None: ssh_key = ssh_key_file.read() result = cluster.create_cluster(vdc, network_name, name, node_count=node_count, cpu=cpu, memory=memory, storage_profile=storage_profile, ssh_key=ssh_key, template=template, enable_nfs=enable_nfs, rollback=not disable_rollback, org=org_name) stdout(result, ctx) except Exception as e: stderr(e, ctx)
def create(ctx, name, node_count, cpu, memory, network_name, storage_profile, ssh_key_file, template, enable_nfs, disable_rollback): """Create a Kubernetes cluster.""" try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] cluster = Cluster(client) ssh_key = None if ssh_key_file is not None: ssh_key = ssh_key_file.read() result = cluster.create_cluster(ctx.obj['profiles'].get('vdc_in_use'), network_name, name, node_count=node_count, cpu=cpu, memory=memory, storage_profile=storage_profile, ssh_key=ssh_key, template=template, enable_nfs=enable_nfs, disable_rollback=disable_rollback) stdout(result, ctx) except Exception as e: stderr(e, ctx)
def clone(ctx, original_role_name, new_role_name, org_name, description): try: restore_session(ctx) client = ctx.obj['client'] if org_name is not None: org_href = client.get_org_by_name(org_name).get('href') else: org_href = ctx.obj['profiles'].get('org_href') org = Org(client, href=org_href) role_resource = org.get_role_resource(original_role_name) # get original role description if description is None: description = to_dict(role_resource)['Description'] # get original role rights role = Role(client, resource=role_resource) raw_rights = role.list_rights() # list of dicts: {'name': 'right'} rights = [right_dict['name'] for right_dict in raw_rights] role = org.create_role(new_role_name, description, rights) stdout(to_dict(role, exclude=['Link', 'RightReferences']), ctx) except Exception as e: stderr(e, ctx)
def info(ctx, name): try: vdc = _get_vdc_ref(ctx) routed_network = vdc.get_routed_orgvdc_network(name) output = {} output['fence_mode'] = routed_network.Configuration.FenceMode output['is_retail_info'] = \ routed_network.Configuration.RetainNetInfoAcrossDeployments if hasattr(routed_network, 'SubInterface'): output['is_sub_interface'] = \ routed_network.Configuration.SubInterface if hasattr(routed_network, 'DistributedInterface'): output['is_distributed_interface'] = \ routed_network.Configuration.DistributedInterface if hasattr(routed_network, 'GuestVlanAllowed'): output['is_guest_vlan_allowed'] = \ routed_network.Configuration.GuestVlanAllowed if hasattr(routed_network, 'ProviderInfo'): output['provider_info'] = routed_network.ProviderInfo if hasattr(routed_network, 'IsShared'): output['is_shared'] = routed_network.IsShared if hasattr(routed_network, 'VimPortGroupRef'): output['vim_server_href'] = \ routed_network.VimPortGroupRef.VimServerRef.get('href') output['vim_server_id'] = \ routed_network.VimPortGroupRef.VimServerRef.get('id') output['vim_server_type'] = \ routed_network.VimPortGroupRef.VimServerRef.get('type') output['vim_server_moref'] = routed_network.VimPortGroupRef.MoRef output['vim_server_vim_object_type'] = \ routed_network.VimPortGroupRef.VimObjectType stdout(output, ctx) except Exception as e: stderr(e, ctx)
def update_ipsec_vpn(ctx, gateway_name, id, name, local_id, peer_id, local_ip, peer_ip, local_subnet, peer_subnet, pre_shared_key, description, encryption_protocol, authentication_mode, dh_group, mtu, enabled, enable_pfs): try: ipsec_vpn_obj = get_ipsec_vpn(ctx, gateway_name, id) ipsec_vpn_obj.update_ipsec_vpn(name=name, peer_id=peer_id, peer_ip_address=peer_ip, local_id=local_id, local_ip_address=local_ip, local_subnet=local_subnet, peer_subnet=peer_subnet, shared_secret_encrypted=pre_shared_key, encryption_protocol=encryption_protocol, authentication_mode=authentication_mode, dh_group=dh_group, mtu=mtu, description=description, is_enabled=enabled, enable_pfs=enable_pfs) stdout('IPsec VPN updated successfully.', ctx) except Exception as e: stderr(e, ctx)
def ovdc_enable(ctx, ovdc_name, pks_plan, pks_cluster_domain, org_name): """Set Kubernetes provider to be Ent-PKS for an org VDC.""" CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] if client.is_sysadmin(): ovdc = PksOvdc(client) if org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') result = ovdc.update_ovdc(enable=True, ovdc_name=ovdc_name, org_name=org_name, pks_plan=pks_plan, pks_cluster_domain=pks_cluster_domain) stdout(result, ctx) CLIENT_LOGGER.debug(result) else: msg = "Insufficient permission to perform operation." stderr(msg, ctx) CLIENT_LOGGER.error(msg) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e), exc_info=True)
def list_ovdcs(ctx, should_print_all=False): """Display org VDCs in vCD that are visible to the logged in user. \b Example vcd cse ovdc list Display ovdcs in vCD that are visible to the logged in user. The user might be prompted if more results needs to be displayed \b vcd cse ovdc list -A Display ovdcs in vCD that are visible to the logged in user without prompting the user. """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] ovdc = Ovdc(client) client_utils.print_paginated_result(ovdc.list_ovdc(), should_print_all=should_print_all, logger=CLIENT_LOGGER) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def ovdc_enable(ctx, ovdc_name, org_name): """Set Kubernetes provider for an org VDC.""" CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: restore_session(ctx) client = ctx.obj['client'] if client.is_sysadmin(): ovdc = Ovdc(client) if org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') result = ovdc.update_ovdc_for_k8s( enable=True, ovdc_name=ovdc_name, org_name=org_name, k8s_provider=K8sProvider.NATIVE) stdout(result, ctx) CLIENT_LOGGER.debug(result) else: msg = "Insufficient permission to perform operation." stderr(msg, ctx) CLIENT_LOGGER.error(msg) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def list_clusters(ctx, vdc, org_name, should_print_all): """Display clusters in vCD that are visible to the logged in user. \b Examples vcd cse cluster list Display clusters in vCD that are visible to the logged in user. \b vcd cse cluster list -vdc ovdc1 Display clusters in vdc 'ovdc1'. """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] cluster = Cluster(client) if not client.is_sysadmin() and org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') client_utils.print_paginated_result(cluster.list_clusters(vdc=vdc, org=org_name), # noqa: E501 should_print_all=should_print_all, logger=CLIENT_LOGGER) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def pwd(ctx): """Current resources in use """ try: restore_session(ctx) host = ctx.obj['profiles'].get('host') user = ctx.obj['profiles'].get('user') in_use_org_name = ctx.obj['profiles'].get('org_in_use') in_use_vdc_name = ctx.obj['profiles'].get('vdc_in_use') in_use_vapp_name = ctx.obj['profiles'].get('vapp_in_use') message = ('connected to %s as \'%s\'\n using org: \'%s\', vdc: \'%s\'' ', vApp: \'%s\'.') % (host, user, in_use_org_name, in_use_vdc_name, in_use_vapp_name) stdout( { 'host': host, 'user': user, 'org': in_use_org_name, 'vdc': in_use_vdc_name, 'vapp': in_use_vapp_name }, ctx, message) except Exception as e: stderr(e, ctx)
def add_vm(ctx, name, source_vapp, source_vm, catalog, target_vm, hostname, network, ip_allocation_mode, storage_profile, password_auto, accept_all_eulas): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] in_use_org_href = ctx.obj['profiles'].get('org_href') org = Org(client, in_use_org_href) vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) source_vapp_resource = None if catalog is None: source_vapp_resource = vdc.get_vapp(source_vapp) else: catalog_item = org.get_catalog_item(catalog, source_vapp) source_vapp_resource = client.get_resource( catalog_item.Entity.get('href')) assert source_vapp_resource is not None vapp_resource = vdc.get_vapp(name) vapp = VApp(client, resource=vapp_resource) spec = {'source_vm_name': source_vm, 'vapp': source_vapp_resource} if target_vm is not None: spec['target_vm_name'] = target_vm if hostname is not None: spec['hostname'] = hostname if network is not None: spec['network'] = network spec['ip_allocation_mode'] = ip_allocation_mode if storage_profile is not None: spec['storage_profile'] = vdc.get_storage_profile(storage_profile) if password_auto is not None: spec['password_auto'] = password_auto task = vapp.add_vms([spec], all_eulas_accepted=accept_all_eulas) stdout(task, ctx) except Exception as e: stderr(e, ctx)
def delete_nodes(ctx, cluster_name, node_names, org, vdc): """Delete node(s) in a cluster that uses native Kubernetes provider. \b Example vcd cse node delete mycluster node-xxxx --yes Delete node 'node-xxxx' in cluster 'mycluster' without prompting. """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') cluster = Cluster(client) result = cluster.delete_nodes(cluster_name, list(node_names), org=org, vdc=vdc) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def deploy(ctx, name, vm_names, power_on, force_customization): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) vapp_resource = vdc.get_vapp(name) vapp = VApp(client, resource=vapp_resource) if power_on is not None: power_on = False if force_customization is not None: force_customization = True if len(vm_names) == 0: task = vapp.deploy(power_on=power_on) stdout(task, ctx) else: for vm_name in vm_names: vm = VM(client, href=vapp.get_vm(vm_name).get('href')) vm.reload() task = vm.deploy(power_on=power_on, force_customization=force_customization) stdout(task, ctx) except Exception as e: stderr(e, ctx)
def cluster_delete(ctx, cluster_name, vdc, org): """Delete an Ent-PKS cluster.""" CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] cluster = PksCluster(client) if not client.is_sysadmin() and org is None: org = ctx.obj['profiles'].get('org_in_use') result = cluster.delete_cluster(cluster_name, org, vdc) # result is empty for delete cluster operation on Ent-PKS clusters. # In that specific case, below check helps to print out a meaningful # message to users. if len(result) == 0: msg = f"Delete cluster operation has been initiated on " \ f"{cluster_name}, please check the status using" \ f" 'vcd cse pks-cluster info {cluster_name}'." click.secho(msg, fg='yellow') CLIENT_LOGGER.debug(msg) stdout(result, ctx) CLIENT_LOGGER.debug(result) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def create_external_network(ctx, name, vc_name, port_group, gateway_ip, netmask, ip_range, description, primary_dns_ip, secondary_dns_ip, dns_suffix): try: restore_session(ctx) client = ctx.obj['client'] platform = Platform(client) ext_net = platform.create_external_network( name=name, vim_server_name=vc_name, port_group_names=port_group, gateway_ip=gateway_ip, netmask=netmask, ip_ranges=ip_range, description=description, primary_dns_ip=primary_dns_ip, secondary_dns_ip=secondary_dns_ip, dns_suffix=dns_suffix) stdout(ext_net['{' + NSMAP['vcloud'] + '}Tasks'].Task[0], ctx) stdout('External network created successfully.', ctx) except Exception as e: stderr(e, ctx)
def ovdc_enable(ctx, ovdc_name, org_name, enable_native, enable_tkg_plus=None): """Set Kubernetes provider for an org VDC. \b Example vcd cse ovdc enable --native --org org1 ovdc1 Enable native cluster deployment in ovdc1 of org1. Supported only for vcd api version >= 35. \b vcd cse ovdc enable ovdc1 Enable ovdc1 for native cluster deployment. Supported only for vcd api version < 35. """ CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}') if not (enable_native or enable_tkg_plus): msg = "Please specify at least one k8 runtime to enable" stderr(msg, ctx) CLIENT_LOGGER.error(msg) k8_runtime = [] if enable_native: k8_runtime.append(shared_constants.ClusterEntityKind.NATIVE.value) if enable_tkg_plus: k8_runtime.append(shared_constants.ClusterEntityKind.TKG_PLUS.value) try: client_utils.cse_restore_session(ctx) client = ctx.obj['client'] if client.is_sysadmin(): ovdc = Ovdc(client) if org_name is None: org_name = ctx.obj['profiles'].get('org_in_use') result = ovdc.update_ovdc(enable=True, ovdc_name=ovdc_name, org_name=org_name, k8s_runtime=k8_runtime) stdout(result, ctx) CLIENT_LOGGER.debug(result) else: msg = "Insufficient permission to perform operation." stderr(msg, ctx) CLIENT_LOGGER.error(msg) except Exception as e: stderr(e, ctx) CLIENT_LOGGER.error(str(e))
def vapp(ctx): """Manage vApps in vCloud Director. \b Description The vapp command manages vApps. \b 'vapp create' creates a new vApp in the current vDC. When '--catalog' and '--template' are not provided, it creates an empty vApp and VMs can be added later. When specifying a template in a catalog, it creates an instance of the catalog template. \b 'vapp add-vm' adds a VM to the vApp. When '--catalog' is used, the <source-vapp> parameter refers to a template in the specified catalog and the command will instantiate the <source-vm> found in the template. If '--catalog' is not used, <source-vapp> refers to another vApp in the vDC and the command will copy the <source-vm> found in the vApp. The name of the VM and other options can be customized when the VM is added to the vApp. \b Examples vcd vapp list Get list of vApps in current virtual datacenter. \b vcd vapp list vapp1 Get list of VMs in vApp 'vapp1'. \b vcd vapp info vapp1 Get details of the vApp 'vapp1'. \b vcd vapp create vapp1 Create an empty vApp with name 'vapp1'. \b vcd vapp create vapp1 --network net1 Create an empty vApp connected to a network. \b vcd vapp create vapp1 -c catalog1 -t template1 Instantiate a vApp from a catalog template. \b vcd vapp create vapp1 -c catalog1 -t template1 \\ --cpu 4 --memory 4096 --disk-size 20000 \\ --network net1 --ip-allocation-mode pool \\ --hostname myhost --vm-name vm1 --accept-all-eulas \\ --storage-profile '*' Instantiate a vApp with customized settings. \b vcd vapp delete vapp1 --yes --force Delete a vApp. \b vcd --no-wait vapp delete vapp1 --yes --force Delete a vApp without waiting for completion. \b vcd vapp update-lease vapp1 7776000 Set vApp lease to 90 days. \b vcd vapp update-lease vapp1 0 Set vApp lease to no expiration. \b vcd vapp shutdown vapp1 --yes Gracefully shutdown a vApp. \b vcd vapp power-off vapp1 Power off a vApp. \b vcd vapp power-off vapp1 vm1 vm2 Power off vm1 and vm2 of vapp1. \b vcd vapp reset vapp1 vm1 vm2 Power reset vm1 and vm2 of vapp1. \b vcd vapp deploy vapp1 vm1 vm2 Deploy vm1 and vm2 of vapp1. \b vcd vapp undeploy vapp1 vm1 vm2 Undeploy vm1 and vm2 of vapp1. \b vcd vapp delete vapp1 vm1 vm2 Delete vm1 and vm2 from vapp1. \b vcd vapp power-on vapp1 Power on a vApp. \b vcd vapp reset vapp1 Power reset vapp1. \b vcd vapp deploy vapp1 Deploy vapp1. \b vcd vapp power-on vapp1 vm1 vm2 Power on vm1 and vm2 of vapp1. \b vcd vapp capture vapp1 catalog1 Capture a vApp as a template in a catalog. \b vcd vapp attach vapp1 vm1 disk1 Attach a disk to a VM in the given vApp. \b vcd vapp detach vapp1 vm1 disk1 Detach a disk from a VM in the given vApp. \b vcd vapp add-disk vapp1 vm1 10000 Add a disk of 10000 MB to a VM. \b vcd vapp add-vm vapp1 template1.ova vm1 -c catalog1 Add a VM to a vApp. Instantiate the source VM \'vm1\' that is in the \'template1.ova\' template in the \'catalog1\' catalog and place the new VM inside \'vapp1\' vApp. """ if ctx.invoked_subcommand is not None: try: restore_session(ctx) if not ctx.obj['profiles'].get('vdc_in_use') or \ not ctx.obj['profiles'].get('vdc_href'): raise Exception('select a virtual datacenter') except Exception as e: stderr(e, ctx)