def get_pe_account_uuid_using_pc_account_uuid_and_subnet_uuid( pc_account_uuid, subnet_uuid ): """ returns pe account uuid using pc account uuid and subnet_uuid """ subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=subnet_uuid, account_uuid=pc_account_uuid, ) if not subnet_cache_data: LOG.error( "AHV Subnet (uuid='{}') not found. Please check subnet or update cache".format( subnet_uuid ) ) sys.exit("Ahv Subnet {} not found".format(subnet_uuid)) # As for nutanix accounts, cluster name is account name subnet_cluster_name = subnet_cache_data["cluster"] pc_account_cache = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ACCOUNT, uuid=pc_account_uuid ) pc_clusters = pc_account_cache["data"].get("clusters", {}) pc_clusters_rev = {v: k for k, v in pc_clusters.items()} return pc_clusters_rev.get(subnet_cluster_name, "")
def get_pe_account_uuid_using_pc_account_uuid_and_nic_data( pc_account_uuid, subnet_name, cluster_name ): """ returns pe account uuid using pc account uuid and subnet_name and cluster_name """ subnet_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.AHV_SUBNET, name=subnet_name, cluster=cluster_name, account_uuid=pc_account_uuid, ) if not subnet_cache_data: LOG.error( "Ahv Subnet (name = '{}') not found in registered Nutanix PC account (uuid = '{}') ".format( subnet_name, pc_account_uuid ) ) sys.exit("AHV Subnet {} not found".format(subnet_name)) # As for nutanix accounts, cluster name is account name subnet_cluster_name = subnet_cache_data["cluster"] pc_account_cache = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ACCOUNT, uuid=pc_account_uuid ) pc_clusters = pc_account_cache["data"].get("clusters", {}) pc_clusters_rev = {v: k for k, v in pc_clusters.items()} return pc_clusters_rev.get(subnet_cluster_name, "")
def get_profile_environment(cls): """ returns the env configuration if present at brownfield vm's profile """ environment = {} cls_profile = common_helper._walk_to_parent_with_given_type(cls, "ProfileType") environment = getattr(cls_profile, "environment", {}) if environment: LOG.debug( "Found environment {} associated to app-profile {}".format( environment.get("name"), cls_profile ) ) else: LOG.debug( "No environment associated to the app-profile {}".format(cls_profile) ) if environment: environment = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment["uuid"] ) return environment
def post_compile(cls, cdict): cdict = super().post_compile(cdict) # Substrate should use account defined in the environment only inv_dict = {v: k for k, v in PROVIDER_ACCOUNT_TYPE_MAP.items()} infra_type_account_map = {} infra = cdict.get("infra_inclusion_list", []) for row in infra: account_ref = row["account_reference"] account_uuid = account_ref.get("uuid") account_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ACCOUNT, uuid=account_uuid) account_name = account_cache_data.get("name") provider_type = account_cache_data.get("provider_type") infra_type_account_map[inv_dict[provider_type]] = Ref.Account( account_name) if infra_type_account_map: substrates = cdict.get("substrate_definition_list", []) for sub in substrates: provider_type = getattr(sub, "provider_type") sub.account = infra_type_account_map[provider_type] return cdict
def render_ahv_vm_nic(cls): nic_data = cls.get_dict() subnet_ref = nic_data["subnet_reference"] nic_type = nic_data["nic_type"] network_function_nic_type = nic_data["network_function_nic_type"] subnet_uuid = subnet_ref["uuid"] subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type="ahv_subnet", uuid=subnet_uuid) user_attrs = {} if not subnet_cache_data: LOG.error("Subnet with uuid '{}' not found".format(subnet_uuid)) sys.exit(-1) user_attrs["subnet_name"] = subnet_cache_data["name"] user_attrs["cluster_name"] = subnet_cache_data["cluster"] schema_file = "" if nic_type == "NORMAL_NIC": if network_function_nic_type == "INGRESS": schema_file = "ahv_normal_ingress_nic.py.jinja2" elif network_function_nic_type == "EGRESS": schema_file = "ahv_normal_egress_nic.py.jinja2" elif network_function_nic_type == "TAP": schema_file = "ahv_normal_tap_nic.py.jinja2" else: LOG.error("Unknown network function nic type '{}'".format( network_function_nic_type)) sys.exit(-1) elif nic_type == "DIRECT_NIC": if network_function_nic_type == "INGRESS": schema_file = "ahv_direct_ingress_nic.py.jinja2" elif network_function_nic_type == "EGRESS": schema_file = "ahv_direct_egress_nic.py.jinja2" elif network_function_nic_type == "TAP": schema_file = "ahv_direct_tap_nic.py.jinja2" else: LOG.error("Unknown network function nic type '{}'".format( network_function_nic_type)) sys.exit(-1) else: LOG.error("Unknown nic type '{}'".format(nic_type)) sys.exit(-1) text = render_template(schema_file=schema_file, obj=user_attrs) return text.strip()
def render_ahv_template(template, bp_name): ContextObj = get_context() project_config = ContextObj.get_project_config() project_name = project_config.get("name") or "default" project_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.PROJECT, name=project_name) if not project_cache_data: LOG.error("Project {} not found. Please run: calm update cache".format( project_name)) sys.exit(-1) # Fetch Nutanix_PC account registered project_accounts = project_cache_data["accounts_data"] account_uuid = project_accounts.get("nutanix_pc", "") if not account_uuid: LOG.error("No nutanix_pc account registered to project {}".format( project_name)) # Fetch whitelisted subnets project_subnets = project_cache_data["whitelisted_subnets"] if not project_subnets: LOG.error("No subnets registered to project {}".format(project_name)) sys.exit(-1) # Fetch data for first subnet subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=project_subnets[0], account_uuid=account_uuid, ) if not subnet_cache_data: # Case when project have a subnet that is not available in subnets from registered account context_data = { "Project Whitelisted Subnets": project_subnets, "Account UUID": account_uuid, "Project Name": project_name, } LOG.debug("Context data: {}".format( json.dumps(context_data, indent=4, separators=(",", ": ")))) LOG.error( "Subnet configuration mismatch in registered account's subnets and whitelisted subnets in project" ) sys.exit(-1) cluster_name = subnet_cache_data["cluster"] default_subnet = subnet_cache_data["name"] LOG.info("Rendering ahv template") text = template.render(bp_name=bp_name, subnet_name=default_subnet, cluster_name=cluster_name) return text.strip() + os.linesep
def create_blueprint_from_json( client, path_to_json, name=None, description=None, force_create=False ): """ creates blueprint from the bp json supplied. NOTE: Project mentioned in the json file remains unchanged """ with open(path_to_json, "r") as f: bp_payload = json.loads(f.read()) ContextObj = get_context() project_config = ContextObj.get_project_config() configured_project = project_config["name"] # If no project is given in payload, it is created with default project bp_project_name = "default" if ( bp_payload.get("metadata") and bp_payload["metadata"].get("project_reference") and bp_payload["metadata"]["project_reference"].get("uuid") ): bp_project_uuid = bp_payload["metadata"]["project_reference"]["uuid"] if bp_project_uuid: bp_project_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.PROJECT, uuid=bp_project_uuid ) if bp_project_data: bp_project_name = bp_project_data["name"] if bp_project_name != configured_project: LOG.warning( "Project in supplied json is different from configured project('{}')".format( configured_project ) ) return create_blueprint( client, bp_payload, name=name, description=description, force_create=force_create, )
def render_ahv_vm_nic(cls): # Note cls.get_dict() may not contain subnet name # So it will fail. So use class attributes instead of getting dict object subnet_ref = cls.subnet_reference if subnet_ref: subnet_ref = subnet_ref.get_dict() nic_type = cls.nic_type network_function_nic_type = cls.network_function_nic_type subnet_uuid = subnet_ref.get("uuid", "") subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type="ahv_subnet", uuid=subnet_uuid ) user_attrs = {} if not subnet_cache_data: LOG.error("Subnet with uuid '{}' not found".format(subnet_uuid)) sys.exit(-1) user_attrs["subnet_name"] = subnet_cache_data["name"] user_attrs["cluster_name"] = subnet_cache_data["cluster"] schema_file = "" if nic_type == "NORMAL_NIC": if network_function_nic_type == "INGRESS": schema_file = "ahv_normal_ingress_nic.py.jinja2" elif network_function_nic_type == "EGRESS": schema_file = "ahv_normal_egress_nic.py.jinja2" elif network_function_nic_type == "TAP": schema_file = "ahv_normal_tap_nic.py.jinja2" else: LOG.error( "Unknown network function nic type '{}'".format( network_function_nic_type ) ) sys.exit(-1) elif nic_type == "DIRECT_NIC": if network_function_nic_type == "INGRESS": schema_file = "ahv_direct_ingress_nic.py.jinja2" elif network_function_nic_type == "EGRESS": schema_file = "ahv_direct_egress_nic.py.jinja2" elif network_function_nic_type == "TAP": schema_file = "ahv_direct_tap_nic.py.jinja2" else: LOG.error( "Unknown network function nic type '{}'".format( network_function_nic_type ) ) sys.exit(-1) else: LOG.error("Unknown nic type '{}'".format(nic_type)) sys.exit(-1) text = render_template(schema_file=schema_file, obj=user_attrs) return text.strip()
def render_ahv_vm_disk(cls, boot_config): data_source_ref = cls.data_source_reference or {} if data_source_ref: data_source_ref = data_source_ref.get_dict() device_properties = cls.device_properties.get_dict() disk_size_mib = cls.disk_size_mib # find device type device_type = device_properties["device_type"] adapter_type = device_properties["disk_address"]["adapter_type"] adapter_index = device_properties["disk_address"]["device_index"] schema_file = "" user_attrs = {} # Atleast one disk should be bootable if boot_config: if ( adapter_type == boot_config["boot_device"]["disk_address"]["adapter_type"] and adapter_index == boot_config["boot_device"]["disk_address"]["device_index"] ): user_attrs["bootable"] = True # find operation_type if data_source_ref: if data_source_ref["kind"] == "app_package": user_attrs["name"] = data_source_ref.get("name") user_attrs["name"] = ( get_package_name(user_attrs["name"]) or user_attrs["name"] ) operation_type = "cloneFromVMDiskPackage" elif data_source_ref["kind"] == "image": operation_type = "cloneFromImageService" img_uuid = data_source_ref.get("uuid") disk_cache_data = ( Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_DISK_IMAGE, uuid=img_uuid ) or {} ) if not disk_cache_data: # Windows images may not be present LOG.warning("Image with uuid '{}' not found".format(img_uuid)) user_attrs["name"] = disk_cache_data.get("name", "") else: LOG.error( "Unknown kind `{}` for data source reference in image".format( data_source_ref["kind"] ) ) else: if device_type == "DISK": user_attrs["size"] = disk_size_mib // 1024 operation_type = "allocateOnStorageContainer" elif device_type == "CDROM": operation_type = "emptyCdRom" else: LOG.error("Unknown device type") sys.exit(-1) # TODO add whitelisting from project via attached accounts if device_type == "DISK": if adapter_type == "SCSI": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_disk_scsi_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_disk_scsi_clone_from_pkg.py.jinja2" elif operation_type == "allocateOnStorageContainer": schema_file = "ahv_vm_disk_scsi_allocate_container.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) elif adapter_type == "PCI": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_disk_pci_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_disk_pci_clone_from_pkg.py.jinja2" elif operation_type == "allocateOnStorageContainer": schema_file = "ahv_vm_disk_pci_allocate_container.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) else: LOG.error("Unknown adapter type {}".format(adapter_type)) sys.exit(-1) else: # CD-ROM if adapter_type == "SATA": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_cdrom_sata_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2" elif operation_type == "emptyCdRom": schema_file = "ahv_vm_cdrom_sata_empty_cdrom.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) elif adapter_type == "IDE": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_cdrom_ide_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2" elif operation_type == "emptyCdRom": schema_file = "ahv_vm_cdrom_ide_empty_cdrom.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) else: LOG.error("Unknown adapter type {}".format(adapter_type)) sys.exit(-1) text = render_template(schema_file=schema_file, obj=user_attrs) return text.strip()
def describe_project(project_name, out): client = get_api_client() project = get_project(project_name) if out == "json": click.echo(json.dumps(project, indent=4, separators=(",", ": "))) return click.echo("\n----Project Summary----\n") click.echo("Name: " + highlight_text(project_name) + " (uuid: " + highlight_text(project["metadata"]["uuid"]) + ")") click.echo("Status: " + highlight_text(project["status"]["state"])) click.echo("Owner: " + highlight_text(project["metadata"]["owner_reference"]["name"])) created_on = arrow.get(project["metadata"]["creation_time"]) past = created_on.humanize() click.echo("Created on: {} ({})".format( highlight_text(time.ctime(created_on.timestamp)), highlight_text(past))) project_resources = project["status"].get("resources", {}) environments = project_resources.get("environment_reference_list", []) click.echo("Environment Registered: ", nl=False) if not environments: click.echo(highlight_text("No")) else: # Handle Multiple Environments click.echo("{} ( uuid: {} )".format(highlight_text("Yes"), environments[0]["uuid"])) users = project_resources.get("user_reference_list", []) if users: user_uuid_name_map = client.user.get_uuid_name_map({"length": 1000}) click.echo("\nRegistered Users: \n--------------------") for user in users: click.echo("\t" + highlight_text(user_uuid_name_map[user["uuid"]])) groups = project_resources.get("external_user_group_reference_list", []) if groups: usergroup_uuid_name_map = client.group.get_uuid_name_map( {"length": 1000}) click.echo("\nRegistered Groups: \n--------------------") for group in groups: click.echo("\t" + highlight_text(usergroup_uuid_name_map[group["uuid"]])) click.echo("\nInfrastructure: \n---------------") subnets_list = [] for subnet in project_resources["subnet_reference_list"]: subnets_list.append(subnet["uuid"]) # Extending external subnet's list from remote account for subnet in project_resources.get("external_network_list", []): subnets_list.append(subnet["uuid"]) accounts = project_resources["account_reference_list"] for account in accounts: account_uuid = account["uuid"] account_cache_data = Cache.get_entity_data_using_uuid( entity_type="account", uuid=account_uuid) if not account_cache_data: LOG.error( "Account (uuid={}) not found. Please update cache".format( account_uuid)) sys.exit(-1) account_type = account_cache_data["provider_type"] click.echo("\nAccount Type: " + highlight_text(account_type.upper())) click.echo("Name: {} (uuid: {})".format( highlight_text(account_cache_data["name"]), highlight_text(account_cache_data["uuid"]), )) if account_type == "nutanix_pc" and subnets_list: AhvVmProvider = get_provider("AHV_VM") AhvObj = AhvVmProvider.get_api_obj() filter_query = "(_entity_id_=={})".format( ",_entity_id_==".join(subnets_list)) nics = AhvObj.subnets(account_uuid=account_uuid, filter_query=filter_query) nics = nics["entities"] click.echo("\n\tWhitelisted Subnets:\n\t--------------------") for nic in nics: nic_name = nic["status"]["name"] vlan_id = nic["status"]["resources"]["vlan_id"] cluster_name = nic["status"]["cluster_reference"]["name"] nic_uuid = nic["metadata"]["uuid"] click.echo( "\tName: {} (uuid: {})\tVLAN ID: {}\tCluster Name: {}". format( highlight_text(nic_name), highlight_text(nic_uuid), highlight_text(vlan_id), highlight_text(cluster_name), )) if not accounts: click.echo(highlight_text("No provider's account registered")) quota_resources = project_resources.get("resource_domain", {}).get("resources", []) if quota_resources: click.echo("\nQuotas: \n-------") for qr in quota_resources: qk = qr["resource_type"] qv = qr["limit"] if qr["units"] == "BYTES": qv = qv // 1073741824 qv = str(qv) + " (GiB)" click.echo("\t{} : {}".format(qk, highlight_text(qv)))
def render_single_vm_bp_ahv_template(template, bp_name): ContextObj = get_context() project_config = ContextObj.get_project_config() project_name = project_config.get("name") or "default" project_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.PROJECT, name=project_name) if not project_cache_data: LOG.error("Project {} not found. Please run: calm update cache".format( project_name)) sys.exit(-1) # Fetch Nutanix_PC account registered project_accounts = project_cache_data["accounts_data"] account_uuid = project_accounts.get("nutanix_pc", "") if not account_uuid: LOG.error("No nutanix_pc account registered to project {}".format( project_name)) # Fetch whitelisted subnets project_subnets = project_cache_data["whitelisted_subnets"] if not project_subnets: LOG.error("No subnets registered to project {}".format(project_name)) sys.exit(-1) # Fetch data for first subnet subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=project_subnets[0], account_uuid=account_uuid, ) if not subnet_cache_data: # Case when project have a subnet that is not available in subnets from registered account context_data = { "Project Whitelisted Subnets": project_subnets, "Account UUID": account_uuid, "Project Name": project_name, } LOG.debug("Context data: {}".format( json.dumps(context_data, indent=4, separators=(",", ": ")))) LOG.error( "Subnet configuration mismatch in registered account's subnets and whitelisted subnets in project" ) sys.exit(-1) cluster_name = subnet_cache_data["cluster"] default_subnet = subnet_cache_data["name"] # Fetch image for vm AhvVmProvider = get_provider("AHV_VM") AhvObj = AhvVmProvider.get_api_obj() try: res = AhvObj.images(account_uuid=account_uuid) except Exception: LOG.error( "Unable to fetch images for Nutanix_PC Account(uuid={})".format( account_uuid)) sys.exit(-1) # NOTE: Make sure you use `DISK` image in your jinja template vm_image = None for entity in res["entities"]: name = entity["status"]["name"] image_type = entity["status"]["resources"].get("image_type", None) or "" if image_type == "DISK_IMAGE": vm_image = name break if not vm_image: LOG.error( "No Disk image found on account(uuid='{}')".format(account_uuid)) sys.exit(-1) LOG.info("Rendering ahv template") text = template.render( bp_name=bp_name, subnet_name=default_subnet, cluster_name=cluster_name, vm_image=vm_image, ) return text.strip() + os.linesep
def create_acp(role, project, acp_users, acp_groups, name): if not (acp_users or acp_groups): LOG.error("Atleast single user/group should be given") sys.exit(-1) client = get_api_client() acp_name = name or "nuCalmAcp-{}".format(str(uuid.uuid4())) # Check whether there is an existing acp with this name params = {"filter": "name=={}".format(acp_name)} res, err = client.acp.list(params=params) if err: return None, err response = res.json() entities = response.get("entities", None) if entities: LOG.error("ACP {} already exists.".format(acp_name)) sys.exit(-1) params = {"length": 1000} project_name_uuid_map = client.project.get_name_uuid_map(params) project_uuid = project_name_uuid_map.get(project, "") if not project_uuid: LOG.error("Project '{}' not found".format(project)) sys.exit(-1) LOG.info("Fetching project '{}' details".format(project)) ProjectInternalObj = get_resource_api("projects_internal", client.connection) res, err = ProjectInternalObj.read(project_uuid) if err: LOG.error(err) sys.exit(-1) project_payload = res.json() project_payload.pop("status", None) project_resources = project_payload["spec"]["project_detail"].get("resources", "") # Check if users are present in project project_users = [] for user in project_resources.get("user_reference_list", []): project_users.append(user["name"]) if not set(acp_users).issubset(set(project_users)): LOG.error( "Users {} are not registered in project".format( set(acp_users).difference(set(project_users)) ) ) sys.exit(-1) # Check if groups are present in project project_groups = [] for group in project_resources.get("external_user_group_reference_list", []): project_groups.append(group["name"]) if not set(acp_groups).issubset(set(project_groups)): LOG.error( "Groups {} are not registered in project".format( set(acp_groups).difference(set(project_groups)) ) ) sys.exit(-1) role_cache_data = Cache.get_entity_data(entity_type="role", name=role) role_uuid = role_cache_data["uuid"] # Check if there is an existing acp with given (project-role) tuple params = { "length": 1000, "filter": "role_uuid=={};project_reference=={}".format(role_uuid, project_uuid), } res, err = client.acp.list(params) if err: return None, err response = res.json() entities = response.get("entities", None) if entities: LOG.error( "ACP {} already exists for given role in project".format( entities[0]["status"]["name"] ) ) sys.exit(-1) # Constructing ACP payload -------- # Getting the cluster uuids for acp whitelisted_subnets = [] for subnet in project_resources.get("subnet_reference_list", []): whitelisted_subnets.append(subnet["uuid"]) for subnet in project_resources.get("external_network_list", []): whitelisted_subnets.append(subnet["uuid"]) cluster_uuids = [] for subnet_uuid in whitelisted_subnets: subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type="ahv_subnet", uuid=subnet_uuid ) cluster_uuids.append(subnet_cache_data["cluster_uuid"]) # Default context for acp default_context = ACP.DEFAULT_CONTEXT # Setting project uuid in default context default_context["scope_filter_expression_list"][0]["right_hand_side"][ "uuid_list" ] = [project_uuid] # Role specific filters entity_filter_expression_list = [] if role == "Project Admin": entity_filter_expression_list = ( ACP.ENTITY_FILTER_EXPRESSION_LIST.PROJECT_ADMIN ) # TODO remove index bases searching entity_filter_expression_list[4]["right_hand_side"]["uuid_list"] = [ project_uuid ] elif role == "Developer": entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.DEVELOPER elif role == "Consumer": entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.CONSUMER elif role == "Operator" and cluster_uuids: entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.CONSUMER if cluster_uuids: entity_filter_expression_list.append( { "operator": "IN", "left_hand_side": {"entity_type": "cluster"}, "right_hand_side": {"uuid_list": cluster_uuids}, } ) # TODO check these users are not present in project's other acps user_references = [] user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) for u in acp_users: user_references.append( {"kind": "user", "name": u, "uuid": user_name_uuid_map[u]} ) usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) group_references = [] for g in acp_groups: group_references.append( {"kind": "user_group", "name": g, "uuid": usergroup_name_uuid_map[g]} ) context_list = [default_context] if entity_filter_expression_list: context_list.append( {"entity_filter_expression_list": entity_filter_expression_list} ) acp_payload = { "acp": { "name": acp_name, "resources": { "role_reference": Ref.Role(role), "user_reference_list": user_references, "user_group_reference_list": group_references, "filter_list": {"context_list": context_list}, }, }, "metadata": {"kind": "access_control_policy"}, "operation": "ADD", } # Appending acp payload to project acp_list = project_payload["spec"].get("access_control_policy_list", []) for _acp in acp_list: _acp["operation"] = "UPDATE" acp_list.append(acp_payload) project_payload["spec"]["access_control_policy_list"] = acp_list LOG.info("Creating acp {}".format(acp_name)) res, err = ProjectInternalObj.update(project_uuid, project_payload) if err: LOG.error(err) sys.exit(-1) res = res.json() stdout_dict = { "name": acp_name, "execution_context": res["status"]["execution_context"], } click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) LOG.info("Polling on acp creation task") watch_task(res["status"]["execution_context"]["task_uuid"])
def get_referenced_account_uuid(cls): """ SUBSTRATE GIVEN UNDER BLUEPRINT If calm-version < v3.2.0: 1. account_reference is not available at substrate-level, So need to read from project only If calm-version >= 3.2.0: 1. account_reference is available at substrate-level 1.a: If env is given at profile-level, then account must be whitelisted in environment 1.b: If env is not given at profile-level, then account must be whitelisted in project 2. If account_reference is not available at substrate-level 2.a: If env is given at profile-level, return provider account in env 2.b: If env is not given at profile-level, return provider account in project SUBSTRATE GIVEN UNDER ENVIRONMENT If calm-version < v3.2.0: 1. account_reference is not available at substrate-level, So need to read from project only If calm-version >= 3.2.0: 1. account_reference is available at substrate-level 1. account must be filtered at environment 2. If account_reference is not available at substrate-level 2.a: return provider account whitelisted in environment """ provider_account = getattr(cls, "account", {}) calm_version = Version.get_version("Calm") provider_type = getattr(cls, "provider_type") provider_account_type = PROVIDER_ACCOUNT_TYPE_MAP.get(provider_type, "") if not provider_account_type: return "" # Fetching project data project_cache_data = common_helper.get_cur_context_project() project_name = project_cache_data.get("name") project_accounts = project_cache_data.get("accounts_data", {}).get( provider_account_type, [] ) if not project_accounts: LOG.error( "No '{}' account registered to project '{}'".format( provider_account_type, project_name ) ) sys.exit(-1) # If substrate is defined in blueprint file cls_bp = common_helper._walk_to_parent_with_given_type(cls, "BlueprintType") if cls_bp: environment = {} for cls_profile in cls_bp.profiles: for cls_deployment in cls_profile.deployments: if cls_deployment.substrate.name != str(cls): continue environment = getattr(cls_profile, "environment", {}) if environment: LOG.debug( "Found environment {} associated to app-profile {}".format( environment.get("name"), cls_profile ) ) break # If environment is given at profile level if environment: environment_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=environment["uuid"] ) if not environment_cache_data: LOG.error( "Environment {} not found. Please run: calm update cache".format( environment["name"] ) ) sys.exit(-1) accounts = environment_cache_data.get("accounts_data", {}).get( provider_account_type, [] ) if not accounts: LOG.error( "Environment '{}' has no '{}' account.".format( environment_cache_data.get("name", ""), provider_account_type, ) ) sys.exit(-1) # If account given at substrate, it should be whitelisted in environment if provider_account and provider_account["uuid"] != accounts[0]["uuid"]: LOG.error( "Account '{}' not filtered in environment '{}'".format( provider_account["name"], environment_cache_data.get("name", ""), ) ) sys.exit(-1) # If provider_account is not given, then fetch from env elif not provider_account: provider_account = { "name": accounts[0]["name"], "uuid": accounts[0]["uuid"], } # If environment is not given at profile level else: # if provider_account is given, it should be part of project if not project_accounts: LOG.error( "No '{}' account registered to project '{}'".format( provider_account_type, project_name ) ) sys.exit(-1) if ( provider_account and provider_account["uuid"] not in project_accounts ): LOG.error( "Account '{}' not filtered in project '{}'".format( provider_account["name"], project_name ) ) sys.exit(-1) # Else take first account in project elif not provider_account: provider_account = {"uuid": project_accounts[0], "kind": "account"} # If substrate defined inside environment cls_env = common_helper._walk_to_parent_with_given_type(cls, "EnvironmentType") if cls_env: infra = getattr(cls_env, "providers", []) whitelisted_account = {} for _pdr in infra: if _pdr.type == PROVIDER_ACCOUNT_TYPE_MAP[provider_type]: whitelisted_account = _pdr.account_reference.get_dict() break if LV(calm_version) >= LV("3.2.0"): if provider_account and provider_account[ "uuid" ] != whitelisted_account.get("uuid", ""): LOG.error( "Account '{}' not filtered in environment '{}'".format( provider_account["name"], str(cls_env) ) ) sys.exit(-1) elif not whitelisted_account: LOG.error( "No account is filtered in environment '{}'".format( str(cls_env) ) ) sys.exit(-1) elif not provider_account: provider_account = whitelisted_account # If version is less than 3.2.0, then it should use account from poroject only, OR # If no account is supplied, will take 0th account in project (in both case of blueprint/environment) if not provider_account: provider_account = {"uuid": project_accounts[0], "kind": "account"} return provider_account["uuid"]
def compile(cls): cdict = super().compile() readiness_probe_dict = {} if "readiness_probe" in cdict and cdict["readiness_probe"]: readiness_probe_dict = cdict["readiness_probe"] if hasattr(readiness_probe_dict, "compile"): readiness_probe_dict = readiness_probe_dict.compile() else: readiness_probe_dict = readiness_probe().compile() # Fill out os specific details if not found if cdict["os_type"] == "Linux": if not readiness_probe_dict.get("connection_type", ""): readiness_probe_dict["connection_type"] = "SSH" if not readiness_probe_dict.get("connection_port", ""): readiness_probe_dict["connection_port"] = 22 if not readiness_probe_dict.get("connection_protocol", ""): readiness_probe_dict["connection_protocol"] = "" else: if not readiness_probe_dict.get("connection_type", ""): readiness_probe_dict["connection_type"] = "POWERSHELL" if not readiness_probe_dict.get("connection_port", ""): readiness_probe_dict["connection_port"] = 5985 if not readiness_probe_dict.get("connection_protocol", ""): readiness_probe_dict["connection_protocol"] = "http" # Fill out address for readiness probe if not given if cdict["type"] == "AHV_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict[ "address" ] = "@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@" elif cdict["type"] == "EXISTING_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict["address"] = "@@{ip_address}@@" elif cdict["type"] == "AWS_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict["address"] = "@@{public_ip_address}@@" elif cdict["type"] == "K8S_POD": # Never used (Omit after discussion) readiness_probe_dict["address"] = "" cdict.pop("editables", None) elif cdict["type"] == "AZURE_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict[ "address" ] = "@@{platform.publicIPAddressList[0]}@@" elif cdict["type"] == "VMWARE_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict["address"] = "@@{platform.ipAddressList[0]}@@" elif cdict["type"] == "GCP_VM": if not readiness_probe_dict.get("address", ""): readiness_probe_dict[ "address" ] = "@@{platform.networkInterfaces[0].accessConfigs[0].natIP}@@" else: raise Exception("Un-supported vm type :{}".format(cdict["type"])) # Adding min defaults in vm spec required by each provider if not cdict.get("create_spec"): # TODO shift them to constants file provider_type_map = { "AWS_VM": "aws", "VMWARE_VM": "vmware", "AHV_VM": "nutanix_pc", # Accounts of type nutanix are not used after 2.9 "AZURE_VM": "azure", "GCP_VM": "gcp", } if cdict["type"] in provider_type_map: if cdict["type"] == "AHV_VM": # UI expects defaults. Jira: https://jira.nutanix.com/browse/CALM-20134 if not cdict.get("create_spec"): cdict["create_spec"] = {"resources": {"nic_list": []}} else: # Getting the account_uuid for each provider # Getting the metadata obj metadata_obj = get_metadata_obj() project_ref = metadata_obj.get("project_reference") or dict() # If project not found in metadata, it will take project from config ContextObj = get_context() project_config = ContextObj.get_project_config() project_name = project_ref.get("name", project_config["name"]) project_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.PROJECT, name=project_name ) if not project_cache_data: LOG.error( "Project {} not found. Please run: calm update cache".format( project_name ) ) sys.exit(-1) # Registered accounts project_accounts = project_cache_data["accounts_data"] provider_type = provider_type_map[cdict["type"]] account_uuids = project_accounts.get(provider_type, []) if not account_uuids: LOG.error( "No {} account registered in project '{}'".format( provider_type, project_name ) ) sys.exit(-1) # Adding default spec cdict["create_spec"] = { "resources": {"account_uuid": account_uuids[0]} } # Template attribute should be present for vmware spec if cdict["type"] == "VMWARE_VM": cdict["create_spec"]["template"] = "" # Modifying the editable object provider_spec_editables = cdict.pop("editables", {}) cdict["editables"] = {} if provider_spec_editables: cdict["editables"]["create_spec"] = provider_spec_editables # Popping out the editables from readiness_probe readiness_probe_editables = readiness_probe_dict.pop("editables_list", []) if readiness_probe_editables: cdict["editables"]["readiness_probe"] = { k: True for k in readiness_probe_editables } # In case we have read provider_spec from a yaml file, validate that we have consistent values for # Substrate.account (if present) and account_uuid in provider_spec (if present). # The account_uuid mentioned in provider_spec yaml should be a registered PE under the Substrate.account PC substrate_account_uuid = cls.get_referenced_account_uuid() spec_account_uuid = "" try: spec_account_uuid = cdict["create_spec"]["resources"]["account_uuid"] except (AttributeError, TypeError, KeyError): pass if substrate_account_uuid: account_cache_data = Cache.get_entity_data_using_uuid( entity_type="account", uuid=substrate_account_uuid ) if not account_cache_data: LOG.error( "Account (uuid={}) not found. Please update cache".format( substrate_account_uuid ) ) sys.exit(-1) account_name = account_cache_data["name"] if spec_account_uuid: if cdict["type"] == "AHV_VM": if ( not account_cache_data.get("data", {}) .get("clusters", {}) .get(spec_account_uuid) ): LOG.error( "cluster account_uuid (uuid={}) used in the provider spec is not found to be registered" " under the Nutanix PC account {}. Please update cache".format( spec_account_uuid, account_name ) ) sys.exit(-1) elif cdict["type"] != "EXISTING_VM": if spec_account_uuid != substrate_account_uuid: LOG.error( "Account '{}'(uuid='{}') not matched with account_uuid used in provider-spec (uuid={})".format( account_name, substrate_account_uuid, spec_account_uuid ) ) sys.exit(-1) # Add account uuid for non-ahv providers if cdict["type"] not in ["EXISTING_VM", "AHV_VM", "K8S_POD"]: cdict["create_spec"]["resources"]["account_uuid"] = substrate_account_uuid cdict.pop("account_reference", None) cdict["readiness_probe"] = readiness_probe_dict return cdict
def update_project_using_cli_switches( project_name, add_user_list, add_group_list, add_account_list, remove_account_list, remove_user_list, remove_group_list, ): client = get_api_client() LOG.info("Fetching project '{}' details".format(project_name)) params = {"length": 1000, "filter": "name=={}".format(project_name)} project_name_uuid_map = client.project.get_name_uuid_map(params) project_uuid = project_name_uuid_map.get(project_name, "") if not project_uuid: LOG.error("Project {} not found.".format(project_name)) sys.exit(-1) res, err = client.project.read(project_uuid) if err: LOG.error(err) sys.exit(-1) project_payload = res.json() project_payload.pop("status", None) project_usage_payload = { "filter": { "account_reference_list": [], "subnet_reference_list": [] } } project_resources = project_payload["spec"]["resources"] project_users = [] project_groups = [] for user in project_resources.get("user_reference_list", []): project_users.append(user["name"]) for group in project_resources.get("external_user_group_reference_list", []): project_groups.append(group["name"]) # Checking remove users/groups are part of project or not if not set(remove_user_list).issubset(set(project_users)): LOG.error("Users {} are not registered in project".format( set(remove_user_list).difference(set(project_users)))) sys.exit(-1) if not set(remove_group_list).issubset(set(project_groups)): LOG.error("Groups {} are not registered in project".format( set(remove_group_list).difference(set(project_groups)))) sys.exit(-1) # Append users updated_user_reference_list = [] updated_group_reference_list = [] acp_remove_user_list = [] acp_remove_group_list = [] for user in project_resources.get("user_reference_list", []): if user["name"] not in remove_user_list: updated_user_reference_list.append(user) else: acp_remove_user_list.append(user["name"]) for group in project_resources.get("external_user_group_reference_list", []): if group["name"] not in remove_group_list: updated_group_reference_list.append(group) else: acp_remove_group_list.append(group["name"]) user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) for user in add_user_list: updated_user_reference_list.append({ "kind": "user", "name": user, "uuid": user_name_uuid_map[user] }) usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) for group in add_group_list: updated_group_reference_list.append({ "kind": "user_group", "name": group, "uuid": usergroup_name_uuid_map[group], }) project_resources["user_reference_list"] = updated_user_reference_list project_resources[ "external_user_group_reference_list"] = updated_group_reference_list # Updating accounts data if not set(add_account_list).isdisjoint(set(remove_account_list)): LOG.error( "Same accounts found in both added and removing list {}".format( set(add_account_list).intersection(set(remove_account_list)))) sys.exit("Same accounts found in both added and removing list") project_accounts = project_resources.get("account_reference_list", []) updated_proj_accounts = [] for _acc in project_accounts: _acc_uuid = _acc["uuid"] account_cache_data = Cache.get_entity_data_using_uuid( entity_type="account", uuid=_acc_uuid) if not account_cache_data: LOG.error( "Account (uuid={}) not found. Please update cache".format( _acc_uuid)) sys.exit("Account (uuid={}) not found".format(_acc_uuid)) if account_cache_data["name"] not in remove_account_list: updated_proj_accounts.append(_acc) else: project_usage_payload["filter"]["account_reference_list"].append( _acc_uuid) project_account_uuids = [_e["uuid"] for _e in updated_proj_accounts] for _acc in add_account_list: account_cache_data = Cache.get_entity_data(entity_type="account", name=_acc) if not account_cache_data: LOG.error( "Account (name={}) not found. Please update cache".format( _acc)) sys.exit("Account (name={}) not found".format(_acc)) # Account already present if account_cache_data["uuid"] in project_account_uuids: continue updated_proj_accounts.append({ "kind": "account", "name": _acc, "uuid": account_cache_data["uuid"] }) project_resources["account_reference_list"] = updated_proj_accounts LOG.info("Checking project usage") res, err = client.project.usage(project_uuid, project_usage_payload) if err: LOG.error(err) sys.exit(-1) project_usage = res.json() msg_list = [] should_update_project = is_project_updation_allowed( project_usage, msg_list) if not should_update_project: LOG.error("Project updation failed") click.echo("\n".join(msg_list)) click.echo( json.dumps( project_usage["status"].get("resources", {}), indent=4, separators=(",", ": "), )) sys.exit(-1) LOG.info("Updating project '{}'".format(project_name)) res, err = client.project.update(project_uuid, project_payload) if err: LOG.error(err) sys.exit(-1) res = res.json() stdout_dict = { "name": res["spec"]["name"], "uuid": res["metadata"]["uuid"], "execution_context": res["status"]["execution_context"], } click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) # Remove project removed user and groups from acps LOG.info("Polling on project updation task") task_state = watch_project_task( project_uuid, res["status"]["execution_context"]["task_uuid"], poll_interval=4) if task_state not in PROJECT_TASK.FAILURE_STATES: if acp_remove_user_list or acp_remove_group_list: LOG.info("Updating project acps") remove_users_from_project_acps( project_uuid=project_uuid, remove_user_list=acp_remove_user_list, remove_group_list=acp_remove_group_list, ) else: LOG.exception( "Project updation task went to {} state".format(task_state)) sys.exit(-1)
def is_project_updation_allowed(project_usage, msg_list): """ Returns whether project update is allowed. Will also update project_usage dict to contain only associate entities Args: project_usage (dict): project usage details Returns: _eusage (bool): is updation allowed """ def is_entity_used(e_usage): entity_used = False app_cnt = e_usage.pop("app", 0) if app_cnt: entity_used = True e_usage["app"] = app_cnt brownfield_cnt = e_usage.get("blueprint", {}).pop("brownfield", 0) greenfield_cnt = e_usage.get("blueprint", {}).pop("greenfield", 0) if brownfield_cnt or greenfield_cnt: entity_used = True if brownfield_cnt: e_usage["blueprint"]["brownfield"] = brownfield_cnt if greenfield_cnt: e_usage["blueprint"]["greenfield"] = greenfield_cnt else: e_usage.pop("blueprint", None) endpoint_cnt = e_usage.pop("endpoint", 0) if endpoint_cnt: entity_used = True e_usage["endpoint"] = endpoint_cnt environment_cnt = e_usage.pop("environment", 0) if environment_cnt: entity_used = True e_usage["environment"] = environment_cnt runbook_cnt = e_usage.pop("runbook", 0) if runbook_cnt: entity_used = True e_usage["runbook"] = runbook_cnt return entity_used updation_allowed = True accounts_usage = project_usage["status"]["resources"].get( "account_list", []) for _ac in accounts_usage: entity_used = is_entity_used(_ac["usage"]) if entity_used: updation_allowed = False account_cache_data = Cache.get_entity_data_using_uuid( entity_type="account", uuid=_ac["uuid"]) msg_list.append( "Please disassociate the account '{}' (uuid='{}') references from existing entities" .format(account_cache_data["name"], account_cache_data["uuid"])) subnets_usage = project_usage["status"]["resources"].get("subnet_list", []) for _snt in subnets_usage: entity_used = is_entity_used(_snt["usage"]) if entity_used: updation_allowed = False subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_SUBNET, uuid=_snt["uuid"]) msg_list.append( "Please disassociate the subnet '{}' (uuid='{}') references from existing entities" .format(subnet_cache_data["name"], subnet_cache_data["uuid"])) return updation_allowed
def compile(cls): cdict = super().compile() cls_substrate = common_helper._walk_to_parent_with_given_type( cls, "SubstrateType" ) account_uuid = ( cls_substrate.get_referenced_account_uuid() if cls_substrate else "" ) # Fetch nutanix account in project project, project_whitelist = common_helper.get_project_with_pc_account() if not account_uuid: account_uuid = list(project_whitelist.keys())[0] subnet_ref = cdict.get("subnet_reference") or dict() subnet_name = subnet_ref.get("name", "") or "" if subnet_name.startswith("@@{") and subnet_name.endswith("}@@"): cdict["subnet_reference"] = { "kind": "subnet", "uuid": subnet_name, } elif subnet_name: cluster_name = subnet_ref.get("cluster", "") subnet_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.AHV_SUBNET, name=subnet_name, cluster=cluster_name, account_uuid=account_uuid, ) if not subnet_cache_data: LOG.debug( "Ahv Subnet (name = '{}') not found in registered Nutanix PC account (uuid = '{}') " "in project (name = '{}')".format( subnet_name, account_uuid, project["name"] ) ) LOG.error( "AHV Subnet {} not found. Please run: calm update cache".format( subnet_name ) ) sys.exit(-1) subnet_uuid = subnet_cache_data.get("uuid", "") # If substrate defined under environment model cls_env = common_helper._walk_to_parent_with_given_type( cls, "EnvironmentType" ) if cls_env: infra = getattr(cls_env, "providers", []) for _pdr in infra: if _pdr.type == "nutanix_pc": subnet_references = getattr(_pdr, "subnet_reference_list", []) subnet_references.extend( getattr(_pdr, "external_network_list", []) ) sr_list = [_sr.get_dict()["uuid"] for _sr in subnet_references] if subnet_uuid not in sr_list: LOG.error( "Subnet '{}' not whitelisted in environment '{}'".format( subnet_name, str(cls_env) ) ) sys.exit(-1) # If provider_spec is defined under substrate and substrate is defined under blueprint model elif cls_substrate: pfl_env = cls_substrate.get_profile_environment() if pfl_env: environment_cache_data = Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.ENVIRONMENT, uuid=pfl_env["uuid"] ) if not environment_cache_data: LOG.error( "Environment {} not found. Please run: calm update cache".format( pfl_env["name"] ) ) sys.exit(-1) env_accounts = environment_cache_data.get("accounts_data", {}).get( "nutanix_pc", [] ) if subnet_uuid not in env_accounts.get(account_uuid, []): LOG.error( "Subnet {} is not whitelisted in environment {}".format( subnet_name, str(pfl_env) ) ) sys.exit(-1) elif subnet_uuid not in project_whitelist.get(account_uuid, []): LOG.error( "Subnet {} is not whitelisted in project {}".format( subnet_name, project["name"] ) ) sys.exit(-1) cdict["subnet_reference"] = { "kind": "subnet", "name": subnet_name, "uuid": subnet_uuid, } nfc_ref = cdict.get("network_function_chain_reference") or dict() nfc_name = nfc_ref.get("name", "") if nfc_name: nfc_cache_data = Cache.get_entity_data( entity_type=CACHE.ENTITY.AHV_NETWORK_FUNCTION_CHAIN, name=nfc_name ) if not nfc_cache_data: LOG.debug( "Ahv Network Function Chain (name = '{}') not found in registered nutanix_pc account (uuid = '{}') in project (name = '{}')".format( nfc_name, account_uuid, project["name"] ) ) LOG.error( "AHV Network Function Chain {} not found. Please run: calm update cache".format( nfc_name ) ) sys.exit(-1) nfc_uuid = nfc_cache_data.get("uuid", "") cdict["network_function_chain_reference"] = { "name": nfc_name, "uuid": nfc_uuid, "kind": "network_function_chain", } return cdict
def describe_acp(acp_name, project_name, out): client = get_api_client() params = {"length": 1000} project_name_uuid_map = client.project.get_name_uuid_map(params) project_uuid = project_name_uuid_map.get(project_name, "") if not project_uuid: LOG.error("Project '{}' not found".format(project_name)) sys.exit(-1) params = { "length": 1000, "filter": "(name=={});(project_reference=={})".format(acp_name, project_uuid), } acp_map = client.acp.get_name_uuid_map(params) acp_uuid = acp_map.get(acp_name, "") if not acp_uuid: LOG.error( "No ACP found with name '{}' and project '{}'".format( acp_name, project_name ) ) sys.exit(-1) LOG.info("Fetching acp {} details".format(acp_name)) res, err = client.acp.read(acp_uuid) if err: LOG.error(err) sys.exit(-1) acp = res.json() if out == "json": click.echo(json.dumps(acp, indent=4, separators=(",", ": "))) return click.echo("\n----ACP Summary----\n") click.echo("Name: " + highlight_text(acp_name) + " (uuid: " + acp_uuid + ")") click.echo("Status: " + highlight_text(acp["status"]["state"])) click.echo("Project: " + highlight_text(project_name)) acp_users = acp["status"]["resources"].get("user_reference_list", []) acp_groups = acp["status"]["resources"].get("user_group_reference_list", []) acp_role = acp["status"]["resources"].get("role_reference", []) if acp_role: role_data = Cache.get_entity_data_using_uuid( entity_type="role", uuid=acp_role["uuid"] ) if not role_data: LOG.error( "Role ({}) details not present. Please update cache".format( acp_role["uuid"] ) ) sys.exit(-1) click.echo("Role: " + highlight_text(role_data["name"])) if acp_users: user_uuid_name_map = client.user.get_uuid_name_map({"length": 1000}) click.echo("Users [{}]:".format(highlight_text(len(acp_users)))) for user in acp_users: click.echo("\t" + highlight_text(user_uuid_name_map[user["uuid"]])) if acp_groups: usergroup_uuid_name_map = client.group.get_uuid_name_map({"length": 1000}) click.echo("Groups [{}]:".format(highlight_text(len(acp_groups)))) for group in acp_groups: click.echo("\t" + highlight_text(usergroup_uuid_name_map[group["uuid"]]))