def native_update_vapp_access_settings( self, prev_user_id_to_acl_entry_dict, # noqa: E501 update_cluster_acl_entries: List[common_models.ClusterAclEntry]): def_entity_user_ids = { acl_entry.memberId for _, acl_entry in prev_user_id_to_acl_entry_dict.items() } total_vapp_access_settings = self.native_get_vapp_settings_only_vapp_shared( def_entity_user_ids) # noqa: E501 # Add updated access settings vapp_access_settings: lxml.objectify.ObjectifiedElement = \ self.vapp.get_access_settings() api_uri = self._client.get_api_uri() system_user_names: Optional[set] = None if self._client.is_sysadmin(): system_user_names = vcd_utils.get_org_user_names( client=self._client, org_name=shared_constants.SYSTEM_ORG_NAME) for acl_entry in update_cluster_acl_entries: user_name = acl_entry.username # Skip system users since sharing can't be outside an org if system_user_names and user_name in system_user_names: continue user_id = pyvcloud_utils.extract_id(acl_entry.memberId) access_level = pyvcloud_utils.extract_id(acl_entry.accessLevelId) # Use 'Change' instead of 'ReadWrite' for vApp access level if access_level == shared_constants.READ_WRITE: access_level = server_constants.CHANGE_ACCESS user_setting = form_vapp_access_setting_entry( access_level=access_level, name=user_name, href=f'{api_uri}{server_constants.ADMIN_USER_PATH}{user_id}', user_id=user_id) total_vapp_access_settings.append(user_setting) vapp_share_contents = { server_constants.VappAccessKey.IS_SHARED_TO_EVERYONE: bool(vapp_access_settings.IsSharedToEveryone), server_constants.VappAccessKey.ACCESS_SETTINGS: { server_constants.VappAccessKey.ACCESS_SETTING: total_vapp_access_settings } # noqa: E501 } org_id = pyvcloud_utils.extract_id(self.def_entity.org.id) org_name = self.def_entity.org.name extra_vapp_headers = { server_constants.TENANT_CONTEXT_HEADER: org_id, server_constants.AUTH_CONTEXT_HEADER: org_name, server_constants.VCLOUD_AUTHORIZATION_HEADER: org_name } self._client.post_resource( uri=f'{self.vapp.href}{def_constants.ACTION_CONTROL_ACCESS_PATH}', contents=vapp_share_contents, media_type='application/*+json', extra_headers=extra_vapp_headers)
def find_vm_in_vapp(ctx, vm_name=None, vm_id=None): result = [] try: resource_type = 'vApp' query = ctx.client.get_typed_query( resource_type, query_result_format=QueryResultFormat.ID_RECORDS) records = list(query.execute()) vdc_resource = ctx.vdc.get_resource() vdc_id = vdc_resource.get('id') vdc_name = vdc_resource.get('name') for curr_vapp in records: vapp_vdc = curr_vapp.get('vdc') if vdc_id != vapp_vdc: continue vapp_id = curr_vapp.get('id') vapp_name = curr_vapp.get('name') vapp_href = curr_vapp.get('href') the_vapp = ctx.vdc.get_vapp(vapp_name) for vm in the_vapp.Children.Vm: if vm.get('name') == vm_name or \ extract_id(vm.get('id')) == vm_id: result.append({ 'vdc': extract_id(vapp_vdc), 'vdc_name': vdc_name, 'vapp': extract_id(vapp_id), 'vapp_name': vapp_name, 'vm': extract_id(vm.get('id')), 'vm_name': vm.get('name'), 'vm_href': vm.get('href'), 'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))) }) break # Refresh session after Typed Query Client.login(session_id=ctx.token) except Exception as e: if ctx.config['debug'] == True: raise else: pass return result
def get_payload_for_v35_cluster_apply(def_entity: DefEntity): """Construct telemetry payload of v35 cluster apply. :param DefEntity def_entity: defined entity instance :return: json telemetry data for the operation :type: dict """ return { PayloadKey.TYPE: CseOperation.V35_CLUSTER_APPLY.telemetry_table, PayloadKey.CLUSTER_ID: uuid_hash(pyvcd_utils.extract_id(def_entity.id)), # noqa: E501 PayloadKey.CLUSTER_KIND: def_entity.entity.kind, PayloadKey.TEMPLATE_NAME: def_entity.entity.spec.k8_distribution.template_name, # noqa: E501 PayloadKey.TEMPLATE_REVISION: def_entity.entity.spec.k8_distribution.template_revision, # noqa: E501 PayloadKey.NUMBER_OF_MASTER_NODES: def_entity.entity.spec.control_plane.count, # noqa: E501 PayloadKey.NUMBER_OF_WORKER_NODES: def_entity.entity.spec.workers.count, # noqa: E501 PayloadKey.NUMBER_OF_NFS_NODES: def_entity.entity.spec.nfs.count, # noqa: E501 PayloadKey.WAS_SSH_KEY_SPECIFIED: bool(def_entity.entity.spec.settings.ssh_key), # noqa: E501 PayloadKey.WAS_ROLLBACK_ENABLED: bool(def_entity.entity.spec.settings.rollback_on_failure) # noqa: E501 }
def list_disks(ctx): try: client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) disks = vdc.get_disks() result = [] for disk in disks: attached_vms = '' if hasattr(disk, 'attached_vms') and \ hasattr(disk.attached_vms, 'VmReference'): attached_vms = disk.attached_vms.VmReference.get('name') result.append({ 'name': disk.get('name'), 'id': extract_id(disk.get('id')), 'owner': disk.Owner.User.get('name'), 'size': humanfriendly.format_size(int(disk.get('size'))), 'size_bytes': disk.get('size'), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'vms_attached': attached_vms }) stdout(result, ctx, show_id=True) except Exception as e: stderr(e, ctx)
def list_disks(ctx): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) disks = vdc.get_disks() result = [] for disk in disks: attached_vms = '' if hasattr(disk, 'attached_vms') and \ hasattr(disk.attached_vms, 'VmReference'): attached_vms = disk.attached_vms.VmReference.get('name') result.append({ 'name': disk.get('name'), 'id': extract_id(disk.get('id')), 'owner': disk.Owner.User.get('name'), 'size': humanfriendly.format_size(int(disk.get('size'))), 'size_bytes': disk.get('size'), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'vms_attached': attached_vms }) stdout(result, ctx, show_id=True) except Exception as e: stderr(e, ctx)
def get_pvdc_id(ovdc): """Get id of pvdc backing an ovdc. :param pyvcloud.vcd.VDC ovdc: This ovdc object has to be created with a sys admin client. :return: pvdc id :rtype: str """ client = None try: client = get_sys_admin_client() pvdc_element = ovdc.get_resource().ProviderVdcReference # To support <= VCD 9.1 where no 'id' is present in pvdc # element, it has to be extracted from href. Once VCD 9.1 support # is discontinued, this code is not required. if float(client.get_api_version()) < \ float(ApiVersion.VERSION_31.value): pvdc_href = pvdc_element.get('href') return pvdc_href.split("/")[-1] else: pvdc_id = pvdc_element.get('id') return extract_id(pvdc_id) finally: if client: client.logout()
def set_ovdc_container_provider_metadata(self, ovdc_name, ovdc_id=None, org_name=None, container_provider=None, pks_plans=''): """Set the container provider metadata of given ovdc. :param str ovdc_name: name of the ovdc :param str ovdc_id: unique id of the ovdc :param str org_name: specific org to use if @org is not given. If None, uses currently logged-in org from @client. :param str container_provider: name of container provider for which the ovdc is being enabled to deploy k8 clusters on. :param str pks_plans: PKS plans for deployment. If container provider is vCD or None, pks_plans are not applicable. """ metadata = dict() org = get_org(self.client, org_name=org_name) if ovdc_id is None: ovdc = get_vdc(self.client, ovdc_name, org=org, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.resource.get('id')) else: ovdc = self._get_vdc_by_id(ovdc_id) if container_provider != 'pks': LOGGER.debug(f'Remove metadata for ovdc:{ovdc_name}') self._remove_metadata(ovdc, self.__ovdc_metadata_keys) metadata['container_provider'] = container_provider or '' else: # Get resource pool resource_pool = f"{ovdc.name} ({ovdc_id})" # Get pvdc and pks information from pvdc cache org_name = org.resource.get('name') pvdc_element = ovdc.resource.ProviderVdcReference pvdc_id = pvdc_element.get('id') pvdc_info = self.pvdc_cache.get_pvdc_info(pvdc_id) pks_info = self.pvdc_cache.get_pks_info(org_name, pvdc_info['vc']) # construct ovdc metadata metadata['name'] = pvdc_info['name'] metadata['vc'] = pvdc_info['vc'] metadata['rp_path'] = ','.join(f'{rp_path}/{resource_pool}' for rp_path in pvdc_info['rp_path']) metadata['host'] = pks_info['host'] metadata['port'] = pks_info['port'] metadata['uaac_port'] = pks_info['uaac_port'] metadata['pks_plans'] = pks_plans or '' metadata['container_provider'] = container_provider pks_compute_profile_name = f"{org_name}-{ovdc_name}-{ovdc_id}" metadata['pks_compute_profile_name'] = pks_compute_profile_name # set ovdc metadata into Vcd LOGGER.debug(f"Setting below metadata on ovdc {ovdc_name}:{metadata}") return ovdc.set_multiple_metadata(metadata, MetadataDomain.SYSTEM, MetadataVisibility.PRIVATE)
def disable_ovdc_for_k8s(self, ovdc_name, org_name=None): """Disable ovdc for k8s for the given container provider. :param str ovdc_name: Name of the ovdc to be enabled :param str org_name: Name of organization that belongs to ovdc_name :return: response object :rtype: dict """ method = 'PUT' ovdc = get_vdc(self.client, ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.resource.get('id')) uri = f'{self._uri}/ovdc/{ovdc_id}/info' data = { 'ovdc_id': ovdc_id, 'ovdc_name': ovdc_name, K8S_PROVIDER_KEY: K8sProviders.NONE, 'pks_plans': None, 'org_name': org_name, 'disable': True } response = self.client._do_request_prim( method, uri, self.client._session, contents=data, media_type='application/json', accept_type='application/*+json') return process_response(response)
def update_ovdc_compute_policies(self, ovdc_name, org_name, compute_policy_name, action): """Update an ovdc's compute policies. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :param str compute_policy_name: Name of compute policy to add or remove :param ComputePolicyAction action: :rtype: dict """ method = RequestMethod.PUT ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) uri = f'{self._uri}/ovdc/{ovdc_id}/compute-policies' data = { RequestKey.OVDC_ID: ovdc_id, # also exists in url RequestKey.COMPUTE_POLICY_NAME: compute_policy_name, RequestKey.COMPUTE_POLICY_ACTION: action } response = self.client._do_request_prim(method, uri, self.client._session, contents=data, media_type='application/json', accept_type='application/json') return process_response(response)
def info_ovdc_for_k8s(self, ovdc_name, org_name=None): """Disable ovdc for k8s for the given container provider. :param str ovdc_name: Name of the ovdc to be enabled :param str org_name: Name of organization that belongs to ovdc_name :return: response object :rtype: dict """ method = 'GET' ovdc = get_vdc(self.client, ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.resource.get('id')) uri = f'{self._uri}/ovdc/{ovdc_id}/info' response = self.client._do_request_prim( method, uri, self.client._session, contents=None, media_type=None, accept_type='application/*+json') return process_response(response)
def get_payload_for_v35_cluster_upgrade(params): """Construct telemetry payload of v35 cluster upgrade. :param dict params: defined entity instance, telemetry source_description :return: json telemetry data for the operation :type: dict """ def_entity = params.get(CLUSTER_ENTITY) return { PayloadKey.TYPE: CseOperation.V35_CLUSTER_UPGRADE.telemetry_table, PayloadKey.CLUSTER_ID: uuid_hash(pyvcd_utils.extract_id(def_entity.id)), # noqa: E501 PayloadKey.CLUSTER_KIND: def_entity.entity.kind, PayloadKey.TEMPLATE_NAME: def_entity.entity.spec.k8_distribution.template_name, # noqa: E501 PayloadKey.TEMPLATE_REVISION: def_entity.entity.spec.k8_distribution.template_revision, # noqa: E501 PayloadKey.SOURCE_ID: SourceMap.get_source_id(params.get( PayloadKey.SOURCE_DESCRIPTION)), # noqa: E501 PayloadKey.SOURCE_DESCRIPTION: params.get(PayloadKey.SOURCE_DESCRIPTION) # noqa: E501 }
def get_payload_for_v36_cluster_config(params): """Construct telemetry payload of v36 cluster config. :param dict params: defined entity instance, telemetry source_description :return: json telemetry data for the operation :type: dict """ def_entity = params.get(CLUSTER_ENTITY) native_entity: rde_2_x.NativeEntity = def_entity.entity return { PayloadKey.TYPE: CseOperation.V36_CLUSTER_CONFIG.telemetry_table, PayloadKey.CLUSTER_ID: uuid_hash(pyvcd_utils.extract_id(def_entity.id)), # noqa: E501 PayloadKey.CLUSTER_KIND: native_entity.kind, PayloadKey.TEMPLATE_NAME: native_entity.spec.distribution.template_name, # noqa: E501 PayloadKey.TEMPLATE_REVISION: native_entity.spec.distribution.template_revision, # noqa: E501 PayloadKey.SOURCE_ID: SourceMap.get_source_id(params.get( PayloadKey.SOURCE_DESCRIPTION)), # noqa: E501 PayloadKey.SOURCE_DESCRIPTION: params.get(PayloadKey.SOURCE_DESCRIPTION) # noqa: E501 }
def update_ovdc(self, enable, ovdc_name, org_name=None, pks_plan=None, pks_cluster_domain=None): """Enable/Disable ovdc for k8s for the given container provider. :param bool enable: If set to True will enable the vdc for the paricular k8s_provider else if set to False, K8 support on the vdc will be disabled. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :param str pks_plan: PKS plan :param str pks_cluster_domain: Suffix of the domain name, which will be used to construct FQDN of the clusters. :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) k8s_provider = server_constants.K8sProvider.PKS if not enable: k8s_provider = server_constants.K8sProvider.NONE pks_plan = None pks_cluster_domain = None return self._pks_ovdc_api.update_ovdc_by_ovdc_id(ovdc_id, k8s_provider, ovdc_name=ovdc_name, org_name=org_name, pks_plan=pks_plan, pks_cluster_domain=pks_cluster_domain) # noqa: E501
def get_org_id_from_vdc_name(client: vcd_client.Client, vdc_name: str): """Return org id given vdc name. :param vcd_client.Client client: vcd client :param str vdc_name: vdc name :return: org id, with no prefix, e.g., '12345' :rtype: str """ if client.is_sysadmin(): resource_type = vcd_client.ResourceType.ADMIN_ORG_VDC.value else: resource_type = vcd_client.ResourceType.ORG_VDC.value query = client.get_typed_query( query_type_name=resource_type, query_result_format=vcd_client.QueryResultFormat.ID_RECORDS, equality_filter=('name', vdc_name)) records = list(query.execute()) if len(records) == 0: return None # Process org id if client.is_sysadmin(): org_urn_id = records[0].attrib['org'] else: org_name = records[0].attrib['orgName'] org_resource = client.get_org_by_name(org_name) org_urn_id = org_resource.attrib['id'] return extract_id(org_urn_id)
def find_disk(ctx, name): try: disk = ctx.vdc.get_disk(name) disk_id = extract_id(disk.get('id')) attached_vm = None if hasattr(disk, 'attached_vms') and hasattr(disk.attached_vms, 'VmReference'): attached_vm = disk.attached_vms.VmReference.get('href').split('/vm-')[-1] return [disk_id, attached_vm] except Exception as e: return [None, None]
def get_vm_disk_relation(ctx): result = [] try: resource_type = 'vmDiskRelation' query = ctx.client.get_typed_query( resource_type, query_result_format=QueryResultFormat.ID_RECORDS) records = list(query.execute()) for curr_disk in records: result.append({ 'disk': extract_id(curr_disk.get('disk')), 'vdc': extract_id(curr_disk.get('vdc')), 'vm': extract_id(curr_disk.get('vm')) }) except Exception as e: if ctx.config['debug'] == True: raise else: pass return result
def native_update_vapp_access_settings( self, prev_user_id_to_acl_entry_dict, # noqa: E501 update_cluster_acl_entries: List[common_models.ClusterAclEntry]): def_entity_user_ids = { acl_entry.memberId for _, acl_entry in prev_user_id_to_acl_entry_dict.items() } total_vapp_access_settings = self.native_get_vapp_settings_only_vapp_shared( def_entity_user_ids) # noqa: E501 # Add updated access settings vapp_access_settings: lxml.objectify.ObjectifiedElement = \ self.vapp.get_access_settings() api_uri = self._client.get_api_uri() for acl_entry in update_cluster_acl_entries: user_id = pyvcloud_utils.extract_id(acl_entry.memberId) access_level = pyvcloud_utils.extract_id(acl_entry.accessLevelId) # Use 'Change' instead of 'ReadWrite' for vApp access level if access_level == shared_constants.READ_WRITE: access_level = server_constants.CHANGE_ACCESS user_setting = form_vapp_access_setting_entry( access_level=access_level, name=acl_entry.username, href=f'{api_uri}{server_constants.ADMIN_USER_PATH}{user_id}', user_id=user_id) total_vapp_access_settings.append(user_setting) vapp_share_contents = { server_constants.VappAccessKey.IS_SHARED_TO_EVERYONE: bool(vapp_access_settings.IsSharedToEveryone), server_constants.VappAccessKey.ACCESS_SETTINGS: { server_constants.VappAccessKey.ACCESS_SETTING: total_vapp_access_settings } # noqa: E501 } self._client.post_resource( uri=f'{self.vapp.href}{def_constants.ACTION_CONTROL_ACCESS_PATH}', contents=vapp_share_contents, media_type='application/*+json')
def get_pvdc_id(self, ovdc): pvdc_element = ovdc.resource.ProviderVdcReference # To support <= VCD 9.1 where no 'id' is present in pvdc # element, it has to be extracted from href. Once VCD 9.1 support # is discontinued, this code is not required. if float(self.client.get_api_version()) < \ float(ApiVersion.VERSION_31.value): pvdc_href = pvdc_element.get('href') return pvdc_href.split("/")[-1] else: pvdc_id = pvdc_element.get('id') return utils.extract_id(pvdc_id)
def find_disk2(ctx, name): result = [] try: disk = ctx.vdc.get_disk(name) disk_id = extract_id(disk.get('id')) vm_id = None if hasattr(disk, 'attached_vms') and hasattr(disk.attached_vms, 'VmReference'): vm_id = disk.attached_vms.VmReference.get('href').split('/vm-')[-1] return disk_id, vm_id except Exception as e: return None, None
def info_ovdc(self, ovdc_name, org_name): """Disable ovdc for k8s for the given container provider. :param str ovdc_name: Name of the org VDC to be enabled :param str org_name: Name of org that @ovdc_name belongs to :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) return self._pks_ovdc_api.get_ovdc(ovdc_id)
def handle_task(client, obj): """Track a task to completion. :param client: The client. :type client: Client :param obj: XML representation of an entity with a task in it :type obj: ObjectifiedElement """ if 'task_href' in obj: obj = client.get_resource(obj.get('task_href')) if isinstance(obj, ObjectifiedElement): if obj.tag == '{' + NSMAP['vcloud'] + '}Task': # Track task output. obj = client.get_resource(obj.get('href')) task = client.get_task_monitor().wait_for_status( task=obj, timeout=60, poll_frequency=5, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=_task_callback) if task.get('status') == TaskStatus.ERROR.value: text = 'task: %s, result: %s, message: %s' % \ (extract_id(task.get('id')), task.get('status'), task.Error.get('message')) print(text) raise Exception("Operation failed!") else: text = 'task: %s, %s, result: %s' % \ (extract_id(task.get('id')), task.get('operation'), task.get('status')) print(text) else: # No task to handle. pass
def get_disks(ctx): result = [] attached_vm = \ lambda x, disk: next((i['vm'] for i in x if i['disk'] == disk), None) try: disks = ctx.vdc.get_disks() disks_relation = get_vm_disk_relation(ctx) for disk in disks: disk_id = extract_id(disk.get('id')) result.append({ 'name': disk.get('name'), 'id': disk_id, 'href': disk.get('href'), 'bus_type': int(disk.get('busType')), 'bus_sub_type': disk.get('busSubType'), 'size_bytes': int(disk.get('size')), 'size_human': bytes_to_size(int(disk.get('size'))), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'attached_vm': attached_vm(disks_relation, disk_id), 'vdc': extract_id(ctx.vdc.resource.get('id')) }) # Refresh session after Typed Query Client.login(session_id=ctx.token) except Exception as e: if ctx.config['debug'] == True: raise else: pass return result
def update_ovdc_for_k8s(self, enable, ovdc_name, org_name=None, container_provider=None, pks_plan=None, pks_cluster_domain=None): """Enable/Disable ovdc for k8s for the given container provider. :param bool enable: If set to True will enable the vdc for the paricular container_provider else if set to False, K8 support on the vdc will be disabled. :param str ovdc_name: Name of the ovdc to be enabled :param str k8s_provider: Name of the container provider :param str pks_plan: PKS plan :param str pks_cluster_domain: Suffix of the domain name, which will be used to construct FQDN of the clusters. :param str org_name: Name of organization that belongs to ovdc_name :return: response object :rtype: dict """ method = RequestMethod.PUT ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) uri = f'{self._uri}/ovdc/{ovdc_id}' if not enable: container_provider = K8sProviders.NONE pks_plan = None pks_cluster_domain = None data = { RequestKey.OVDC_ID: ovdc_id, RequestKey.OVDC_NAME: ovdc_name, RequestKey.ORG_NAME: org_name, RequestKey.K8S_PROVIDER: container_provider, RequestKey.PKS_PLAN_NAME: pks_plan, RequestKey.PKS_CLUSTER_DOMAIN: pks_cluster_domain, } response = self.client._do_request_prim( method, uri, self.client._session, contents=data, media_type='application/json', accept_type='application/json') return process_response(response)
def get_pvdc_id(ovdc: VDC): """Get id of pvdc backing an ovdc. :param pyvcloud.vcd.VDC ovdc: This ovdc object has to be created with a sys admin client. :return: pvdc id :rtype: str """ raise_error_if_user_not_from_system_org(ovdc.client) pvdc_element = ovdc.get_resource().ProviderVdcReference pvdc_id = pvdc_element.get('id') return extract_id(pvdc_id)
def list_ovdc_compute_policies(self, ovdc_name, org_name): """List an ovdc's compute policies. :param str ovdc_name: Name of org VDC to update :param str org_name: Name of org that @ovdc_name belongs to :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) return self._ovdc_api.list_ovdc_compute_policies(ovdc_id)
def get_disks(ctx): result = [] try: disks = ctx.vdc.get_disks() for disk in disks: disk_id = extract_id(disk.get('id')) attached_vm = None attached_vm_href = None if hasattr(disk, 'attached_vms') and hasattr(disk.attached_vms, 'VmReference'): attached_vm = disk.attached_vms.VmReference.get('name') attached_vm_href = disk.attached_vms.VmReference.get('href') result.append( { 'name': disk.get('name'), 'id': disk_id, 'href': disk.get('href'), 'storage_profile': disk.StorageProfile.get('name'), 'storage_profile_href': disk.StorageProfile.get('href'), 'bus_type': int(disk.get('busType')), 'bus_sub_type': disk.get('busSubType'), 'size_bytes': int(disk.get('size')), 'size_human': bytes_to_size( int(disk.get('size')) ), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'attached_vm': attached_vm, 'attached_vm_href': attached_vm_href, 'vdc': extract_id(ctx.vdc.resource.get('id')) } ) except Exception as e: if ctx.config['debug'] == True: raise else: pass return result
def get_payload_for_v35_cluster_upgrade_plan(def_entity: DefEntity): """Construct telemetry payload of v35 cluster upgrade plan. :param DefEntity def_entity: defined entity instance :return: json telemetry data for the operation :type: dict """ return { PayloadKey.TYPE: CseOperation.V35_CLUSTER_UPGRADE_PLAN.telemetry_table, PayloadKey.CLUSTER_ID: uuid_hash(pyvcd_utils.extract_id(def_entity.id)), # noqa: E501 PayloadKey.CLUSTER_KIND: def_entity.entity.kind }
def get_payload_for_v35_cluster_info(params): """Construct telemetry payload of v35 cluster info. :param dict params: parameters provided to the operation :return: json telemetry data for the operation :type: dict """ return { PayloadKey.TYPE: CseOperation.V35_CLUSTER_INFO.telemetry_table, PayloadKey.CLUSTER_ID: uuid_hash(pyvcd_utils.extract_id(params.get( PayloadKey.CLUSTER_ID))) # noqa: E501 }
def update_ovdc(self, ovdc_name, k8s_runtime, enable=True, org_name=None, remove_cp_from_vms_on_disable=False): """Enable/Disable ovdc for k8s for the given k8s provider. :param str ovdc_name: Name of org VDC to update :param List[str] k8s_runtime: k8s_runtime of the k8s provider to enable / disable for the ovdc :param bool enable: If set to True will enable the vdc for the paricular k8s_runtime else if set to False, K8 support on the vdc will be disabled. :param str org_name: Name of org that @ovdc_name belongs to :param bool remove_cp_from_vms_on_disable: If set to True and enable is False, then all the vms in the ovdc having policies for the k8s_runtime is deleted. :rtype: dict """ ovdc = get_vdc(self.client, vdc_name=ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.get_resource().get('id')) curr_ovdc = self._ovdc_api.get_ovdc(ovdc_id) runtimes = curr_ovdc.k8s_runtime for k in k8s_runtime: if enable: if k in runtimes: raise Exception( f"OVDC {ovdc_name} already enabled for {k8s_runtime}" ) # noqa: E501 runtimes.append(k) else: if k not in runtimes: raise Exception( f"OVDC {ovdc_name} already disabled for {k8s_runtime}" ) # noqa: E501 runtimes.remove(k) updated_ovdc = common_models.Ovdc( k8s_runtime=runtimes, org_name=org_name, remove_cp_from_vms_on_disable=remove_cp_from_vms_on_disable) return self._ovdc_api.update_ovdc(ovdc_id, updated_ovdc)
def enable_ovdc_for_k8s(self, ovdc_name, k8s_provider=None, pks_plan=None, pks_cluster_domain=None, org_name=None): """Enable ovdc for k8s for the given container provider. :param str ovdc_name: Name of the ovdc to be enabled :param str k8s_provider: Name of the container provider :param str pks_plan: PKS plan :param str pks_cluster_domain: Suffix of the domain name, which will be used to construct FQDN of the clusters. :param str org_name: Name of organization that belongs to ovdc_name :return: response object :rtype: dict """ method = 'PUT' ovdc = get_vdc(self.client, ovdc_name, org_name=org_name, is_admin_operation=True) ovdc_id = utils.extract_id(ovdc.resource.get('id')) uri = f'{self._uri}/ovdc/{ovdc_id}/info' data = { 'ovdc_id': ovdc_id, 'ovdc_name': ovdc_name, K8S_PROVIDER_KEY: k8s_provider, 'pks_plans': pks_plan, 'pks_cluster_domain': pks_cluster_domain, 'org_name': org_name, 'enable': True } response = self.client._do_request_prim( method, uri, self.client._session, contents=data, media_type='application/json', accept_type='application/*+json') return process_response(response)
def create_disk(ctx, name, size, storage_profile_name, bus_type=None, bus_sub_type=None): """ Create an independent disk volume :param name: (str): The name of the new disk :param size: (str): The size of the new disk in string format (e.g. 100Mi, 2Gi) :param storage_profile_name: (str): The name of the storage profile where a created disk will be attached :return (str): The disk identifier on success, or empty string on failure """ try: size = size_to_bytes(size) if size == 0: raise disk_resource = ctx.vdc.create_disk( name=name, size=size, storage_profile_name=storage_profile_name, bus_type=str(bus_type), bus_sub_type=bus_sub_type) task = ctx.client.get_task_monitor().wait_for_status( task=disk_resource.Tasks.Task[0], timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) assert task.get('status') == TaskStatus.SUCCESS.value disk_id = extract_id(disk_resource[0].get('id')) except Exception as e: if ctx.config['debug'] == True: raise else: return "" return disk_id
def stdout(obj, ctx=None, alt_text=None, show_id=False, sort_headers=True): global last_message last_message = '' o = obj if ctx is not None and \ 'json_output' in ctx.find_root().params and \ ctx.find_root().params['json_output']: if isinstance(obj, str): o = {'message': obj} text = json.dumps(o, sort_keys=True, indent=4, separators=(',', ': ')) if sys.version_info[0] < 3: text = str(text, 'utf-8') if ctx.find_root().params['is_colorized']: click.echo( highlight(text, lexers.JsonLexer(), formatters.TerminalFormatter())) else: click.echo(text) else: if alt_text is not None: text = alt_text elif isinstance(obj, str): text = o else: if 'task_href' in obj: obj = ctx.obj['client'].get_resource(obj.get('task_href')) if isinstance(obj, ObjectifiedElement): if obj.tag == '{' + NSMAP['vcloud'] + '}Task': if ctx is not None and \ 'no_wait' in ctx.find_root().params and \ ctx.find_root().params['no_wait']: text = as_prop_value_list(obj, show_id=show_id) else: client = ctx.obj['client'] task = client.get_task_monitor().wait_for_status( task=obj, timeout=60, poll_frequency=5, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=task_callback) if task.get('status') == TaskStatus.ERROR.value: text = 'task: %s, result: %s, message: %s' % \ (extract_id(task.get('id')), task.get('status'), task.Error.get('message')) # TODO(should return != 0) else: text = 'task: %s, %s, result: %s' % \ (extract_id(task.get('id')), task.get('operation'), task.get('status')) elif ctx.command.name == 'list' and \ isinstance(obj, collections.Iterable): text = as_table(obj) elif ctx.command.name == 'info': text = as_table( [{ 'property': k, 'value': v } for k, v in sorted(to_dict(obj).items())], show_id=show_id) else: text = as_table(to_dict(obj), show_id=show_id) elif not isinstance(obj, list): obj1 = {} for k, v in obj.items(): if type(v) in [list, tuple]: value = ''.join('%s\n' % x for x in v) elif type(v) is dict: value = ''.join( '%s: %s\n' % (x, y) for x, y in v.items()) else: value = v obj1[k] = value text = as_prop_value_list(obj1, show_id=show_id) else: text = as_table( obj, show_id=show_id, sort_headers=sort_headers) click.echo('\x1b[2K\r' + text)