def share_def_entity(self, acl_entry: common_models.ClusterAclEntry): access_controls_path = \ f'{cloudapi_constants.CloudApiResource.ENTITIES}/' \ f'{self._cluster_id}/{cloudapi_constants.CloudApiResource.ACL}' ent_kind = self.def_entity.entity.kind \ if hasattr(self.def_entity, 'entity') else self.def_entity.kind if ent_kind in \ [shared_constants.ClusterEntityKind.NATIVE.value, shared_constants.ClusterEntityKind.TKG_PLUS.value]: org_id = vcd_utils.extract_id(self.def_entity.org.id) elif ent_kind == shared_constants.ClusterEntityKind.TKG_S.value: vdc_name = self.def_entity.metadata.virtualDataCenterName org_id = vcd_utils.get_org_id_from_vdc_name(client=self._client, vdc_name=vdc_name) else: raise Exception(f"Invalid entity kind: {ent_kind}") payload = acl_entry.construct_filtered_dict( include=shared_constants.DEF_ENTITY_ACCESS_CONTROL_KEYS) self._cloudapi_client.do_request( method=shared_constants.RequestMethod.POST, cloudapi_version=cloudapi_constants.CloudApiVersion.VERSION_1_0_0, resource_url_relative_path=access_controls_path, additional_request_headers={ server_constants.TENANT_CONTEXT_HEADER: org_id }, # noqa: E501 payload=payload)
def _get_cse_ovdc_list(sysadmin_client: vcd_client.Client, ovdc_list: list): ovdcs = [] config = server_utils.get_server_runtime_config() log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire')) cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=log_wire) for ovdc in ovdc_list: ovdc_name = ovdc.get('name') ovdc_id = vcd_utils.extract_id(ovdc.get('id')) # obtain ovdc runtime details for the ovdc ovdc_details = asdict( get_ovdc_k8s_runtime_details(sysadmin_client, ovdc_id=ovdc_id, ovdc_name=ovdc_name, cpm=cpm, log_wire=log_wire)) # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in # config is set to false, Prevent showing information # about TKG+ by skipping TKG+ from the result. if ClusterEntityKind.TKG_PLUS.value in ovdc_details['k8s_runtime'] \ and not server_utils.is_tkg_plus_enabled(): # noqa: E501 ovdc_details['k8s_runtime'].remove( ClusterEntityKind.TKG_PLUS.value) # noqa: E501 # TODO: Find a better way to remove remove_cp_from_vms_on_disable del ovdc_details['remove_cp_from_vms_on_disable'] ovdcs.append(ovdc_details) return ovdcs
def get_ovdc_k8s_runtime_details( sysadmin_client: vcd_client.Client, ovdc_id=None, ovdc_name=None, org_name=None, cpm: Optional[ compute_policy_manager.ComputePolicyManager] = None, # noqa: E501 log_wire=False) -> common_models.Ovdc: """Get k8s runtime details for an ovdc. At least ovdc_id and ovdc_name or org_name and ovdc_name should be provided. Additional call to get ovdc details can be avoided by providing ovdc_id and ovdc_name. :param vcd_client.Client sysadmin_client: vcd sysadmin client :param str ovdc_id: :param str ovdc_name: :param str org_name: :param compute_policy_manager.ComputePolicyManager cpm: :param bool log_wire: :return: Ovdc object with k8s runtimes :rtype: common_models.Ovdc """ vcd_utils.raise_error_if_user_not_from_system_org(sysadmin_client) if not cpm: cpm = compute_policy_manager.ComputePolicyManager(sysadmin_client, log_wire=log_wire) if not (org_name and ovdc_name) and not ovdc_id: msg = "Unable to fetch OVDC k8 runtime details with the " \ "provided parameters" logger.SERVER_LOGGER.error(msg) raise Exception(msg) if not ovdc_id or not ovdc_name: # populate ovdc_id and ovdc_name ovdc = vcd_utils.get_vdc(client=sysadmin_client, vdc_id=ovdc_id, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = vcd_utils.extract_id(ovdc.get_resource().get('id')) ovdc_name = ovdc.get_resource().get('name') policies = [] for cse_policy in \ compute_policy_manager.list_cse_placement_policies_on_vdc(cpm, ovdc_id): # noqa: E501 policies.append(RUNTIME_INTERNAL_NAME_TO_DISPLAY_NAME_MAP[ cse_policy['display_name']]) # noqa: E501 return common_models.Ovdc(ovdc_name=ovdc_name, ovdc_id=ovdc_id, k8s_runtime=policies) # noqa: E501
def _get_cse_ovdc_list(sysadmin_client: vcd_client.Client, org_vdcs: list) -> list: ovdcs = [] for ovdc in org_vdcs: ovdc_name = ovdc.get('name') org_name = ovdc.get('orgName') ovdc_id = vcd_utils.extract_id(ovdc.get('id')) # obtain the runtimes supported stored in # ovdc metadata k8s_metadata = ovdc_utils.get_ovdc_k8s_provider_metadata( sysadmin_client, ovdc_id=ovdc_id, ovdc_name=ovdc_name, org_name=org_name) k8s_provider = k8s_metadata[K8S_PROVIDER_KEY] ovdc_dict = { OvdcInfoKey.OVDC_NAME: ovdc_name, OvdcInfoKey.ORG_NAME: org_name, OvdcInfoKey.K8S_PROVIDER: k8s_provider } ovdcs.append(ovdc_dict) return ovdcs
def init_rde_environment(config_filepath=BASE_CONFIG_FILEPATH, logger=NULL_LOGGER): # noqa: E501 """Set up module variables according to config dict. :param str config_filepath: :param logging.Logger logger: """ global CLIENT, ORG_HREF, VDC_HREF, \ CATALOG_NAME, TEARDOWN_INSTALLATION, TEARDOWN_CLUSTERS, \ TEST_ALL_TEMPLATES, SYS_ADMIN_LOGIN_CMD, \ CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_LOGIN_CMD, \ USERNAME_TO_LOGIN_CMD, USERNAME_TO_CLUSTER_NAME, TEST_ORG_HREF, \ TEST_VDC_HREF, VCD_API_VERSION_TO_USE, VCD_SITE logger.debug("Setting RDE environement") config = testutils.yaml_to_dict(config_filepath) logger.debug(f"Config file used: {config}") sysadmin_client = Client(config['vcd']['host'], verify_ssl_certs=config['vcd']['verify']) sysadmin_client.set_credentials( BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password'])) vcd_supported_api_versions = \ set(sysadmin_client.get_supported_versions_list()) cse_supported_api_versions = set( shared_constants.SUPPORTED_VCD_API_VERSIONS) # noqa: E501 common_supported_api_versions = \ list(cse_supported_api_versions.intersection(vcd_supported_api_versions)) # noqa: E501 common_supported_api_versions.sort() max_api_version = get_max_api_version(common_supported_api_versions) CLIENT = Client(config['vcd']['host'], api_version=max_api_version, verify_ssl_certs=config['vcd']['verify']) credentials = BasicLoginCredentials(config['vcd']['username'], shared_constants.SYSTEM_ORG_NAME, config['vcd']['password']) CLIENT.set_credentials(credentials) VCD_API_VERSION_TO_USE = max_api_version logger.debug(f"Using VCD api version: {VCD_API_VERSION_TO_USE}") CATALOG_NAME = config['broker']['catalog'] VCD_SITE = f"https://{config['vcd']['host']}" SYS_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} system " \ f"{config['vcd']['username']} " \ f"-iwp {config['vcd']['password']} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_ADMIN_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_ADMIN_NAME} " \ f"-iwp {CLUSTER_ADMIN_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" CLUSTER_AUTHOR_LOGIN_CMD = f"login {config['vcd']['host']} " \ f"{TEST_ORG}" \ f" {CLUSTER_AUTHOR_NAME} " \ f"-iwp {CLUSTER_AUTHOR_PASSWORD} " \ f"-V {VCD_API_VERSION_TO_USE}" USERNAME_TO_LOGIN_CMD = { SYS_ADMIN_NAME: SYS_ADMIN_LOGIN_CMD, CLUSTER_ADMIN_NAME: CLUSTER_ADMIN_LOGIN_CMD, CLUSTER_AUTHOR_NAME: CLUSTER_AUTHOR_LOGIN_CMD } # hrefs for Org and VDC that hosts the catalog org = pyvcloud_utils.get_org(CLIENT, org_name=config['broker']['org']) vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=config['broker']['vdc'], org=org) ORG_HREF = org.href VDC_HREF = vdc.href logger.debug(f"Using template org {org.get_name()} with href {ORG_HREF}") logger.debug(f"Using template vdc {vdc.name} with href {VDC_HREF}") # hrefs for Org and VDC that tests cluster operations test_org = pyvcloud_utils.get_org(CLIENT, org_name=TEST_ORG) test_vdc = pyvcloud_utils.get_vdc(CLIENT, vdc_name=TEST_VDC, org=test_org) TEST_ORG_HREF = test_org.href TEST_VDC_HREF = test_vdc.href logger.debug(f"Using test org {test_org.get_name()} " f"with href {TEST_ORG_HREF}") logger.debug(f"Using test vdc {test_vdc.name} with href {TEST_VDC_HREF}") if SHOULD_INSTALL_PREREQUISITES: create_cluster_admin_role(config['vcd'], logger=logger) create_cluster_author_role(config['vcd'], logger=logger) # create and publish sizing class sc1 to TEST_VDC cpm = ComputePolicyManager(sysadmin_client=sysadmin_client, log_wire=True) created_policy = None try: created_policy = cpm.add_vdc_compute_policy( SIZING_CLASS_NAME, description=SIZING_CLASS_DESCRIPTION, cpu_count=2, memory_mb=2048) except HTTPError as err: if 'already exists' in err.response.text: logger.debug( f"Compute policy {SIZING_CLASS_NAME} already exists" ) # noqa: E501 created_policy = cpm.get_vdc_compute_policy(SIZING_CLASS_NAME) else: logger.error( f"Request to create sizing policy {SIZING_CLASS_NAME} failed." ) # noqa: E501 raise try: cpm.add_compute_policy_to_vdc( pyvcloud_utils.extract_id( test_vdc.get_resource_admin().get('id')), # noqa: E501 created_policy['id']) except Exception as err: logger.error( f"Error publishing sizing policy {SIZING_CLASS_NAME} to vdc {TEST_VDC}: {err}" ) # noqa: E501 create_cluster_admin_role(config['vcd'], logger=logger) create_cluster_author_role(config['vcd'], logger=logger)
def ovdc_list(request_data, op_ctx: ctx.OperationContext): """Request handler for ovdc list operation. :return: List of dictionaries with org VDC k8s provider metadata. :rtype: list """ # NOTE: response sent out by this handler should not be paginated data = req_utils.flatten_request_data( request_data, [RequestKey.QUERY_PARAMS]) defaults = { RequestKey.LIST_PKS_PLANS: False, } validated_data = {**defaults, **data} list_pks_plans = utils.str_to_bool(validated_data[RequestKey.LIST_PKS_PLANS]) # noqa: E501 # Record telemetry data cse_params = copy.deepcopy(validated_data) cse_params[RequestKey.LIST_PKS_PLANS] = list_pks_plans cse_params[PayloadKey.SOURCE_DESCRIPTION] = thread_local_data.get_thread_local_data(ThreadLocalData.USER_AGENT) # noqa: E501 record_user_action_details(cse_operation=CseOperation.OVDC_LIST, cse_params=cse_params) client_v33 = op_ctx.get_client(api_version=DEFAULT_API_VERSION) if list_pks_plans and not client_v33.is_sysadmin(): raise e.UnauthorizedRequestError( 'Operation denied. Enterprise PKS plans visible only ' 'to System Administrators.') ovdcs = [] org_vdcs = vcd_utils.get_all_ovdcs(client_v33) sysadmin_client_v33 = \ op_ctx.get_sysadmin_client(api_version=DEFAULT_API_VERSION) for ovdc in org_vdcs: ovdc_name = ovdc.get('name') org_name = ovdc.get('orgName') ovdc_id = vcd_utils.extract_id(ovdc.get('id')) k8s_metadata = ovdc_utils.get_ovdc_k8s_provider_metadata( sysadmin_client_v33, ovdc_id=ovdc_id, ovdc_name=ovdc_name, org_name=org_name) k8s_provider = k8s_metadata[K8S_PROVIDER_KEY] ovdc_dict = { OvdcInfoKey.OVDC_NAME: ovdc_name, OvdcInfoKey.ORG_NAME: org_name, OvdcInfoKey.K8S_PROVIDER: k8s_provider } if list_pks_plans: pks_plans = '' pks_server = '' if k8s_provider == K8sProvider.PKS: # vc name for vdc can only be found using typed query qfilter = f"name=={urllib.parse.quote(ovdc_name)};" \ f"orgName=={urllib.parse.quote(org_name)}" q = client_v33.get_typed_query( vcd_client.ResourceType.ADMIN_ORG_VDC.value, query_result_format=vcd_client.QueryResultFormat.RECORDS, # noqa: E501 qfilter=qfilter) # should only ever be one element in the generator ovdc_records = list(q.execute()) if len(ovdc_records) == 0: raise vcd_e.EntityNotFoundException( f"Org VDC {ovdc_name} not found in org {org_name}") ovdc_record = None for record in ovdc_records: ovdc_record = pyvcd_utils.to_dict( record, resource_type=vcd_client.ResourceType.ADMIN_ORG_VDC.value) # noqa: E501 break vc_to_pks_plans_map = {} pks_contexts = pksbroker_manager.create_pks_context_for_all_accounts_in_org(op_ctx) # noqa: E501 for pks_context in pks_contexts: if pks_context['vc'] in vc_to_pks_plans_map: continue pks_broker = pksbroker.PksBroker(pks_context, op_ctx) plans = pks_broker.list_plans() plan_names = [plan.get('name') for plan in plans] vc_to_pks_plans_map[pks_context['vc']] = \ [plan_names, pks_context['host']] pks_plan_and_server_info = vc_to_pks_plans_map.get( ovdc_record['vcName'], []) if len(pks_plan_and_server_info) > 0: pks_plans = pks_plan_and_server_info[0] pks_server = pks_plan_and_server_info[1] ovdc_dict[PKSOvdcInfoKey.PKS_API_SERVER] = pks_server ovdc_dict[PKSOvdcInfoKey.AVAILABLE_PKS_PLANS] = pks_plans ovdcs.append(ovdc_dict) return ovdcs
def remove_compute_policy_from_vdc_sync(self, vdc, compute_policy_href, force=False, is_placement_policy=False, task_resource=None): """Remove compute policy from vdc. This method makes use of an umbrella task which can be used for tracking progress. If the umbrella task is not specified, it is created. :param pyvcloud.vcd.vdc.VDC vdc: VDC object :param str compute_policy_href: href of the compute policy to remove :param bool force: Force remove compute policy from vms in the VDC as well :param lxml.objectify.Element task_resource: Task resource for the umbrella task """ user_name = self._session.get('user') task = Task(self._sysadmin_client) task_href = None is_umbrella_task = task_resource is not None # Create a task if not umbrella task if not is_umbrella_task: # TODO the following org will be associated with 'System' org. # task created should be associated with the corresponding org of # the vdc object. org = vcd_utils.get_org(self._sysadmin_client) org.reload() user_href = org.get_user(user_name).get('href') org_href = org.href task_resource = task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation= f"Removing compute policy (href: {compute_policy_href})" # noqa: E501 f" from org VDC (vdc id: {vdc.name})", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, org_href=org.href) else: user_href = task_resource.User.get('href') org_href = task_resource.Organization.get('href') task_href = task_resource.get('href') try: # remove the compute policy from VMs if force is True if force: compute_policy_id = retrieve_compute_policy_id_from_href( compute_policy_href) # noqa: E501 vdc_id = vcd_utils.extract_id(vdc.get_resource().get('id')) vapps = vcd_utils.get_all_vapps_in_ovdc( client=self._sysadmin_client, ovdc_id=vdc_id) target_vms = [] system_default_href = None operation_msg = None for cp_dict in self.list_compute_policies_on_vdc(vdc_id): if cp_dict['name'] == _SYSTEM_DEFAULT_COMPUTE_POLICY: system_default_href = cp_dict['href'] break if is_placement_policy: for vapp in vapps: target_vms += \ [vm for vm in vapp.get_all_vms() if self._get_vm_placement_policy_id(vm) == compute_policy_id] # noqa: E501 vm_names = [vm.get('name') for vm in target_vms] operation_msg = f"Removing placement policy from " \ f"{len(vm_names)} VMs. " \ f"Affected VMs: {vm_names}" else: for vapp in vapps: target_vms += \ [vm for vm in vapp.get_all_vms() if self._get_vm_sizing_policy_id(vm) == compute_policy_id] # noqa: E501 vm_names = [vm.get('name') for vm in target_vms] operation_msg = "Setting sizing policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' on " \ f"{len(vm_names)} VMs. " \ f"Affected VMs: {vm_names}" task.update(status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor = self._sysadmin_client.get_task_monitor() for vm_resource in target_vms: vm = VM(self._sysadmin_client, href=vm_resource.get('href')) _task = None operation_msg = None if is_placement_policy: if hasattr(vm_resource, 'ComputePolicy') and \ not hasattr(vm_resource.ComputePolicy, 'VmSizingPolicy'): # noqa: E501 # Updating sizing policy for the VM _task = vm.update_compute_policy( compute_policy_href=system_default_href) operation_msg = \ "Setting compute policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\ f"on VM '{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name= f'Setting sizing policy to {_SYSTEM_DEFAULT_COMPUTE_POLICY}', # noqa: E501 details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) _task = vm.remove_placement_policy() operation_msg = "Removing placement policy on VM " \ f"'{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) else: _task = vm.update_compute_policy( compute_policy_href=system_default_href) operation_msg = "Setting sizing policy to " \ f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\ f"on VM '{vm_resource.get('name')}'" task.update( status=vcd_client.TaskStatus.RUNNING.value, namespace='vcloud.cse', operation=operation_msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) task_monitor.wait_for_success(_task) final_status = vcd_client.TaskStatus.RUNNING.value \ if is_umbrella_task else vcd_client.TaskStatus.SUCCESS.value task.update(status=final_status, namespace='vcloud.cse', operation=f"Removing compute policy (href:" f"{compute_policy_href}) from org VDC '{vdc.name}'", operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=user_name, task_href=task_href, org_href=org_href) vdc.remove_compute_policy(compute_policy_href) except Exception as err: logger.SERVER_LOGGER.error(err, exc_info=True) # Set task to error if not an umbrella task if not is_umbrella_task: msg = 'Failed to remove compute policy: ' \ f'{compute_policy_href} from the OVDC: {vdc.name}' task.update(status=vcd_client.TaskStatus.ERROR.value, namespace='vcloud.cse', operation=msg, operation_name='Remove org VDC compute policy', details='', progress=None, owner_href=vdc.href, owner_name=vdc.name, owner_type=vcd_client.EntityType.VDC.value, user_href=user_href, user_name=self._session.get('user'), task_href=task_href, org_href=org_href, error_message=f"{err}", stack_trace='') raise err
def org_vdc_list(request_data, op_ctx: ctx.OperationContext): """Request handler for ovdc list operation. :return: dictionary containing list of Org VDCs :rtype: dict """ # NOTE: Response sent out by this handler should be paginated data = req_utils.flatten_request_data(request_data, [RequestKey.QUERY_PARAMS]) defaults = { RequestKey.LIST_PKS_PLANS: False, PaginationKey.PAGE_NUMBER: CSE_PAGINATION_FIRST_PAGE_NUMBER, PaginationKey.PAGE_SIZE: CSE_PAGINATION_DEFAULT_PAGE_SIZE } validated_data = {**defaults, **data} page_number = int(validated_data[PaginationKey.PAGE_NUMBER]) page_size = int(validated_data[PaginationKey.PAGE_SIZE]) list_pks_plans = utils.str_to_bool( validated_data[RequestKey.LIST_PKS_PLANS]) # noqa: E501 # Record telemetry data # TODO: enhance telemetry to record the page number and page size data. cse_params = copy.deepcopy(validated_data) cse_params[RequestKey.LIST_PKS_PLANS] = list_pks_plans cse_params[PayloadKey. SOURCE_DESCRIPTION] = thread_local_data.get_thread_local_data( ThreadLocalData.USER_AGENT) # noqa: E501 record_user_action_details(cse_operation=CseOperation.OVDC_LIST, cse_params=cse_params) if list_pks_plans and not op_ctx.client.is_sysadmin(): raise e.UnauthorizedRequestError( 'Operation denied. Enterprise PKS plans visible only ' 'to System Administrators.') ovdcs = [] result = \ vcd_utils.get_ovdcs_by_page(op_ctx.client, page=page_number, page_size=page_size) org_vdcs = result[PaginationKey.VALUES] result_total = result[PaginationKey.RESULT_TOTAL] next_page_uri = result.get(PaginationKey.NEXT_PAGE_URI) prev_page_uri = result.get(PaginationKey.PREV_PAGE_URI) for ovdc in org_vdcs: ovdc_name = ovdc.get('name') org_name = ovdc.get('orgName') ovdc_id = vcd_utils.extract_id(ovdc.get('id')) k8s_metadata = ovdc_utils.get_ovdc_k8s_provider_metadata( op_ctx.sysadmin_client, ovdc_id=ovdc_id, ovdc_name=ovdc_name, org_name=org_name) k8s_provider = k8s_metadata[K8S_PROVIDER_KEY] ovdc_dict = { OvdcInfoKey.OVDC_NAME: ovdc_name, OvdcInfoKey.ORG_NAME: org_name, OvdcInfoKey.K8S_PROVIDER: k8s_provider } if list_pks_plans: pks_plans = '' pks_server = '' if k8s_provider == K8sProvider.PKS: # vc name for vdc can only be found using typed query qfilter = f"name=={urllib.parse.quote(ovdc_name)};" \ f"orgName=={urllib.parse.quote(org_name)}" q = op_ctx.client.get_typed_query( vcd_client.ResourceType.ADMIN_ORG_VDC.value, query_result_format=vcd_client.QueryResultFormat. RECORDS, # noqa: E501 qfilter=qfilter) # should only ever be one element in the generator ovdc_records = list(q.execute()) if len(ovdc_records) == 0: raise vcd_e.EntityNotFoundException( f"Org VDC {ovdc_name} not found in org {org_name}") ovdc_record = None for record in ovdc_records: ovdc_record = pyvcd_utils.to_dict( record, resource_type=vcd_client.ResourceType.ADMIN_ORG_VDC. value) # noqa: E501 break vc_to_pks_plans_map = {} pks_contexts = pksbroker_manager.create_pks_context_for_all_accounts_in_org( op_ctx) # noqa: E501 for pks_context in pks_contexts: if pks_context['vc'] in vc_to_pks_plans_map: continue pks_broker = pksbroker.PksBroker(pks_context, op_ctx) plans = pks_broker.list_plans() plan_names = [plan.get('name') for plan in plans] vc_to_pks_plans_map[pks_context['vc']] = \ [plan_names, pks_context['host']] pks_plan_and_server_info = vc_to_pks_plans_map.get( ovdc_record['vcName'], []) if len(pks_plan_and_server_info) > 0: pks_plans = pks_plan_and_server_info[0] pks_server = pks_plan_and_server_info[1] ovdc_dict[PKSOvdcInfoKey.PKS_API_SERVER] = pks_server ovdc_dict[PKSOvdcInfoKey.AVAILABLE_PKS_PLANS] = pks_plans ovdcs.append(ovdc_dict) api_path = CseServerOperationInfo.PKS_ORG_VDC_LIST.api_path_format next_page_uri = vcd_utils.create_cse_page_uri(op_ctx.client, api_path, vcd_uri=next_page_uri) prev_page_uri = vcd_utils.create_cse_page_uri(op_ctx.client, api_path, vcd_uri=prev_page_uri) return server_utils.construct_paginated_response( values=ovdcs, result_total=result_total, page_number=page_number, page_size=page_size, next_page_uri=next_page_uri, # noqa: E501 prev_page_uri=prev_page_uri) # noqa: E501