def check_live_migration_config(self): migration_svc = self._conn.Msvm_VirtualSystemMigrationService()[0] vsmssd = ( self._conn.Msvm_VirtualSystemMigrationServiceSettingData()[0]) if not vsmssd.EnableVirtualSystemMigration: raise exceptions.HyperVException( _('Live migration is not enabled on this host')) if not migration_svc.MigrationServiceListenerIPAddressList: raise exceptions.HyperVException( _('Live migration networks are not configured on this host'))
def verify_host_remotefx_capability(self): synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0] if not synth_3d_video_pool.IsGpuCapable: raise exceptions.HyperVRemoteFXException( _("To enable RemoteFX on Hyper-V at least one GPU supporting " "DirectX 11 is required.")) if not synth_3d_video_pool.IsSlatCapable: raise exceptions.HyperVRemoteFXException( _("To enable RemoteFX on Hyper-V it is required that the host " "GPUs support SLAT."))
def check_live_migration_config(self): conn_v2 = self._get_conn_v2() migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0] vsmssds = migration_svc.associators( wmi_association_class='Msvm_ElementSettingData', wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData') vsmssd = vsmssds[0] if not vsmssd.EnableVirtualSystemMigration: raise exceptions.HyperVException( _('Live migration is not enabled on this host')) if not migration_svc.MigrationServiceListenerIPAddressList: raise exceptions.HyperVException( _('Live migration networks are not configured on this host'))
def _get_conn_v2(self, host='localhost'): try: return self._get_wmi_obj(self._wmi_namespace % host) except exceptions.x_wmi as ex: LOG.exception(_LE('Get version 2 connection error')) if ex.com_error.hresult == -2147217394: msg = (_('Live migration is not supported on target host "%s"') % host) elif ex.com_error.hresult == -2147023174: msg = (_('Target live migration host "%s" is unreachable') % host) else: msg = _('Live migration failed: %s') % ex.message raise exceptions.HyperVException(msg)
def _get_conn_v2(self, host='localhost'): try: return wmi.WMI(moniker='//%s/root/virtualization/v2' % host) except wmi.x_wmi as ex: LOG.exception(_LE('Get version 2 connection error')) if ex.com_error.hresult == -2147217394: msg = (_('Live migration is not supported on target host "%s"') % host) elif ex.com_error.hresult == -2147023174: msg = (_('Target live migration host "%s" is unreachable') % host) else: msg = _('Live migration failed: %s') % ex.message raise exceptions.HyperVException(msg)
def _wait_for_job(self, job_path): """Poll WMI job state and wait for completion.""" job_wmi_path = job_path.replace("\\", "/") job = wmi.WMI(moniker=job_wmi_path) while job.JobState == constants.WMI_JOB_STATE_RUNNING: time.sleep(0.1) job = wmi.WMI(moniker=job_wmi_path) if job.JobState == constants.JOB_STATE_KILLED: LOG.debug("WMI job killed with status %s.", job.JobState) return job if job.JobState != constants.WMI_JOB_STATE_COMPLETED: job_state = job.JobState if job.path().Class == "Msvm_ConcreteJob": err_sum_desc = job.ErrorSummaryDescription err_desc = job.ErrorDescription err_code = job.ErrorCode data = { "job_state": job_state, "err_sum_desc": err_sum_desc, "err_desc": err_desc, "err_code": err_code, } raise exceptions.HyperVException( _( "WMI job failed with status %(job_state)d. " "Error details: %(err_sum_desc)s - %(err_desc)s - " "Error code: %(err_code)d" ) % data ) else: (error, ret_val) = job.GetError() if not ret_val and error: data = {"job_state": job_state, "error": error} raise exceptions.HyperVException( _("WMI job failed with status %(job_state)d. " "Error details: %(error)s") % data ) else: raise exceptions.HyperVException( _("WMI job failed with status %d. No error " "description available") % job_state ) desc = job.Description elap = job.ElapsedTime LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s", {"desc": desc, "elap": elap}) return job
def _wait_for_job(self, job_path): """Poll WMI job state and wait for completion.""" job_wmi_path = job_path.replace('\\', '/') job = wmi.WMI(moniker=job_wmi_path) while job.JobState == constants.WMI_JOB_STATE_RUNNING: time.sleep(0.1) job = wmi.WMI(moniker=job_wmi_path) if job.JobState == constants.JOB_STATE_KILLED: LOG.debug("WMI job killed with status %s.", job.JobState) return job if job.JobState != constants.WMI_JOB_STATE_COMPLETED: job_state = job.JobState if job.path().Class == "Msvm_ConcreteJob": err_sum_desc = job.ErrorSummaryDescription err_desc = job.ErrorDescription err_code = job.ErrorCode data = { 'job_state': job_state, 'err_sum_desc': err_sum_desc, 'err_desc': err_desc, 'err_code': err_code } raise exceptions.HyperVException( _("WMI job failed with status %(job_state)d. " "Error details: %(err_sum_desc)s - %(err_desc)s - " "Error code: %(err_code)d") % data) else: (error, ret_val) = job.GetError() if not ret_val and error: data = {'job_state': job_state, 'error': error} raise exceptions.HyperVException( _("WMI job failed with status %(job_state)d. " "Error details: %(error)s") % data) else: raise exceptions.HyperVException( _("WMI job failed with status %d. No error " "description available") % job_state) desc = job.Description elap = job.ElapsedTime LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s", { 'desc': desc, 'elap': elap }) return job
def _get_vnic_settings(self, vnic_name): vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData( ElementName=vnic_name) if not vnic_settings: raise exceptions.HyperVException( message=_('Vnic not found: %s') % vnic_name) return vnic_settings[0]
def check_ret_val(self, ret_val, job_path, success_values=[0]): if ret_val in [constants.WMI_JOB_STATUS_STARTED, constants.WMI_JOB_STATE_RUNNING]: return self._wait_for_job(job_path) elif ret_val not in success_values: raise exceptions.HyperVException( _('Operation failed with return value: %s') % ret_val)
def enable_remotefx_video_adapter(self, vm_name, monitor_count, max_resolution, vram_bytes=None): vm = self._lookup_vm_check(vm_name, as_vssd=False) self._validate_remotefx_params(monitor_count, max_resolution, vram_bytes=vram_bytes) rasds = _wqlutils.get_element_associated_class( self._conn, self._CIM_RES_ALLOC_SETTING_DATA_CLASS, element_uuid=vm.Name) if [r for r in rasds if r.ResourceSubType == self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE]: raise exceptions.HyperVRemoteFXException( _("RemoteFX is already configured for this VM")) synth_disp_ctrl_res_list = [r for r in rasds if r.ResourceSubType == self._SYNTH_DISP_CTRL_RES_SUB_TYPE] if synth_disp_ctrl_res_list: self._jobutils.remove_virt_resource(synth_disp_ctrl_res_list[0]) max_res_value = self._remote_fx_res_map.get(max_resolution) self._add_3d_display_controller(vm, monitor_count, max_res_value, vram_bytes) if self._vm_has_s3_controller(vm.ElementName): s3_disp_ctrl_res = [r for r in rasds if r.ResourceSubType == self._S3_DISP_CTRL_RES_SUB_TYPE][0] s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS_DX_11 self._jobutils.modify_virt_resource(s3_disp_ctrl_res)
def execute(self, *args, **kwargs): stdout_value, stderr_value = _utils.execute(*args, **kwargs) if stdout_value.find('The operation completed successfully') == -1: raise exceptions.HyperVException( _('An error has occurred when calling the iscsi initiator: %s') % stdout_value) return stdout_value
def get_element_associated_class(conn, class_name, element_instance_id=None, element_uuid=None, fields=None): """Returns the objects associated to an element as a list. :param conn: connection to be used to execute the query :param class_name: object's class type name to be retrieved :param element_instance_id: element class InstanceID :param element_uuid: UUID of the element :param fields: specific class attributes to be retrieved """ if element_instance_id: instance_id = element_instance_id elif element_uuid: instance_id = "Microsoft:%s" % element_uuid else: err_msg = _("Could not get element associated class. Either element " "instance id or element uuid must be specified.") raise exceptions.WqlException(err_msg) fields = ", ".join(fields) if fields else "*" return conn.query( "SELECT %(fields)s FROM %(class_name)s WHERE InstanceID " "LIKE '%(instance_id)s%%'" % { 'fields': fields, 'class_name': class_name, 'instance_id': instance_id})
def _get_vswitch(self, vswitch_name): vswitch = self._conn.Msvm_VirtualEthernetSwitch( ElementName=vswitch_name) if not len(vswitch): raise exceptions.HyperVException(_('VSwitch not found: %s') % vswitch_name) return vswitch[0]
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): port_alloc = self._get_switch_port_allocation(switch_port_name)[0] vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) if vlan_settings: if (vlan_settings.OperationMode == self._OPERATION_MODE_ACCESS and vlan_settings.AccessVlanId == vlan_id): # VLAN already set to corect value, no need to change it. return # Removing the feature because it cannot be modified # due to a wmi exception. self._jobutils.remove_virt_feature(vlan_settings) # remove from cache. self._vlan_sds.pop(port_alloc.InstanceID, None) vlan_settings = self._create_default_setting_data( self._PORT_VLAN_SET_DATA) vlan_settings.AccessVlanId = vlan_id vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS self._jobutils.add_virt_feature(vlan_settings, port_alloc) # TODO(claudiub): This will help solve the missing VLAN issue, but it # comes with a performance cost. The root cause of the problem must # be solved. vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) if not vlan_settings: raise exceptions.HyperVException( _('Port VLAN not found: %s') % switch_port_name)
def _check_handle_type(self, handle_type): if handle_type not in self._HANDLE_TYPES: err_msg = _("Invalid cluster handle type: %(handle_type)s. " "Allowed handle types: %(allowed_types)s.") raise exceptions.Invalid( err_msg % dict(handle_type=handle_type, allowed_types=self._HANDLE_TYPES))
def login_storage_target(self, target_lun, target_iqn, target_portal, auth_username=None, auth_password=None): """Ensure that the target is logged in.""" self._login_target_portal(target_portal) # Listing targets self.execute("iscsicli.exe", "ListTargets") retry_count = CONF.hyperv.volume_attach_retry_count # If the target is not connected, at least two iterations are needed: # one for performing the login and another one for checking if the # target was logged in successfully. if retry_count < 2: retry_count = 2 for attempt in range(retry_count): try: session_info = self.execute("iscsicli.exe", "SessionList") if session_info.find(target_iqn) == -1: # Sending login self.execute("iscsicli.exe", "qlogintarget", target_iqn, auth_username, auth_password) else: return except exceptions.HyperVException as exc: LOG.debug( "Attempt %(attempt)d to connect to target " "%(target_iqn)s failed. Retrying. " "Exceptipn: %(exc)s ", {"target_iqn": target_iqn, "exc": exc, "attempt": attempt}, ) time.sleep(CONF.hyperv.volume_attach_retry_interval) raise exceptions.HyperVException(_("Failed to login target %s") % target_iqn)
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): port_alloc, found = self._get_switch_port_allocation(switch_port_name) if not found: raise exceptions.HyperVException( _('Port Allocation not found: %s') % switch_port_name) vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) if vlan_settings: if (vlan_settings.OperationMode == self._OPERATION_MODE_ACCESS and vlan_settings.AccessVlanId == vlan_id): # VLAN already set to corect value, no need to change it. return # Removing the feature because it cannot be modified # due to a wmi exception. self._jobutils.remove_virt_feature(vlan_settings) # remove from cache. self._vlan_sds.pop(port_alloc.InstanceID, None) vlan_settings = self._create_default_setting_data( self._PORT_VLAN_SET_DATA) vlan_settings.AccessVlanId = vlan_id vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS self._jobutils.add_virt_feature(vlan_settings, port_alloc)
def get_device_number_from_device_name(self, device_name): matches = self._phys_dev_name_regex.findall(device_name) if matches: return matches[0] err_msg = _("Could not find device number for device: %s") raise exceptions.DiskNotFound(err_msg % device_name)
def start(self): try: self._open_pipe() if self._log_file_path: self._log_file_handle = open(self._log_file_path, 'ab', 1) jobs = [self._read_from_pipe] if self._input_queue and self._connect_event: jobs.append(self._write_to_pipe) for job in jobs: worker = threading.Thread(target=job) worker.setDaemon(True) worker.start() self._workers.append(worker) except Exception as err: msg = (_("Named pipe handler failed to initialize. " "Pipe Name: %(pipe_name)s " "Error: %(err)s") % {'pipe_name': self._pipe_name, 'err': err}) LOG.error(msg) self.stop() raise exceptions.OSWinException(msg)
def _validate_remotefx_params(self, monitor_count, max_resolution, vram_bytes=None): max_res_value = self._remote_fx_res_map.get(max_resolution) if max_res_value is None: raise exceptions.HyperVRemoteFXException( _("Unsupported RemoteFX resolution: %s") % max_resolution) if monitor_count > self._remotefx_max_monitors_map[max_resolution]: raise exceptions.HyperVRemoteFXException( _("Unsuported RemoteFX monitor count: %(count)s for " "this resolution %(res)s. Hyper-V supports a maximum " "of %(max_monitors)s monitors for this resolution.") % {'count': monitor_count, 'res': max_resolution, 'max_monitors': self._remotefx_max_monitors_map[ max_resolution]})
def set_vswitch_port_vsid(self, vsid, switch_port_name): port_alloc = self._get_switch_port_allocation(switch_port_name)[0] vsid_settings = self._get_security_setting_data_from_port_alloc( port_alloc) if vsid_settings: if vsid_settings.VirtualSubnetId == vsid: # VSID already added, no need to readd it. return # Removing the feature because it cannot be modified # due to a wmi exception. self._jobutils.remove_virt_feature(vsid_settings) # remove from cache. self._vsid_sds.pop(port_alloc.InstanceID, None) vsid_settings = self._create_default_setting_data( self._PORT_SECURITY_SET_DATA) vsid_settings.VirtualSubnetId = vsid self._jobutils.add_virt_feature(vsid_settings, port_alloc) # TODO(claudiub): This will help solve the missing VSID issue, but it # comes with a performance cost. The root cause of the problem must # be solved. vsid_settings = self._get_security_setting_data_from_port_alloc( port_alloc) if not vsid_settings: raise exceptions.HyperVException( _('Port VSID not found: %s') % switch_port_name)
def check_ret_val(self, ret_val, job_path, success_values=[0]): """Checks that the job represented by the given arguments succeeded. Some Hyper-V operations are not atomic, and will return a reference to a job. In this case, this method will wait for the job's completion. :param ret_val: integer, representing the return value of the job. if the value is WMI_JOB_STATUS_STARTED or WMI_JOB_STATE_RUNNING, a job_path cannot be None. :param job_path: string representing the WMI object path of a Hyper-V job. :param success_values: list of return values that can be considered successful. WMI_JOB_STATUS_STARTED and WMI_JOB_STATE_RUNNING values are ignored. :raises exceptions.HyperVException: if the given ret_val is WMI_JOB_STATUS_STARTED or WMI_JOB_STATE_RUNNING and the state of job represented by the given job_path is not WMI_JOB_STATE_COMPLETED, or if the given ret_val is not in the list of given success_values. """ if ret_val in [constants.WMI_JOB_STATUS_STARTED, constants.WMI_JOB_STATE_RUNNING]: return self._wait_for_job(job_path) elif ret_val not in success_values: raise exceptions.HyperVException( _('Operation failed with return value: %s') % ret_val)
def _get_wt_snapshot(self, description, fail_if_not_found=True): wt_snapshots = self._conn_wmi.WT_Snapshot(Description=description) if wt_snapshots: return wt_snapshots[0] elif fail_if_not_found: err_msg = _('Could not find WT Snapshot: %s') raise exceptions.ISCSITargetException(err_msg % description)
def _get_wt_disk(self, description, fail_if_not_found=True): # We can retrieve WT Disks only by description. wt_disks = self._conn_wmi.WT_Disk(Description=description) if wt_disks: return wt_disks[0] elif fail_if_not_found: err_msg = _('Could not find WT Disk: %s') raise exceptions.ISCSITargetException(err_msg % description)
def _get_wt_host(self, target_name, fail_if_not_found=True): hosts = self._conn_wmi.WT_Host(HostName=target_name) if hosts: return hosts[0] elif fail_if_not_found: err_msg = _('Could not find iSCSI target %s') raise exceptions.ISCSITargetException(err_msg % target_name)
def get_free_controller_slot(self, scsi_controller_path): attached_disks = self.get_attached_disks(scsi_controller_path) used_slots = [int(disk.AddressOnParent) for disk in attached_disks] for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER): if slot not in used_slots: return slot raise exceptions.HyperVException(_("Exceeded the maximum number of slots"))
def remove_wt_disk(self, wtd_name): try: wt_disk = self._get_wt_disk(wtd_name, fail_if_not_found=False) if wt_disk: wt_disk.Delete_() except wmi.x_wmi as wmi_exc: err_msg = _("Failed to remove WT disk: %s.") raise exceptions.ISCSITargetWMIException(err_msg % wtd_name, wmi_exc=wmi_exc)
def change_wt_disk_status(self, wtd_name, enabled): try: wt_disk = self._get_wt_disk(wtd_name) wt_disk.Enabled = enabled wt_disk.put() except wmi.x_wmi as wmi_exc: err_msg = _('Could not change disk status. WT Disk name: %s') raise exceptions.ISCSITargetWMIException(err_msg % wtd_name, wmi_exc=wmi_exc)
def import_wt_disk(self, vhd_path, wtd_name): """Import a vhd/x image to be used by Windows iSCSI targets.""" try: self._conn_wmi.WT_Disk.ImportWTDisk(DevicePath=vhd_path, Description=wtd_name) except wmi.x_wmi as wmi_exc: err_msg = _("Failed to import WT disk: %s.") raise exceptions.ISCSITargetWMIException(err_msg % vhd_path, wmi_exc=wmi_exc)
def _get_planned_vm(self, vm_name, conn_v2=None, fail_if_not_found=False): if not conn_v2: conn_v2 = self._conn planned_vm = conn_v2.Msvm_PlannedComputerSystem(ElementName=vm_name) if planned_vm: return planned_vm[0] elif fail_if_not_found: raise exceptions.HyperVException(_("Cannot find planned VM with name: %s") % vm_name) return None
def _get_vm(self, conn_v2, vm_name): vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if not n: raise exceptions.HyperVVMNotFoundException(vm_name=vm_name) elif n > 1: raise exceptions.HyperVException(_('Duplicate VM name found: %s') % vm_name) return vms[0]
def _unique_result(objects, resource_name): n = len(objects) if n == 0: raise exceptions.NotFound(resource=resource_name) elif n > 1: raise exceptions.OSWinException( _('Duplicate resource name found: %s') % resource_name) else: return objects[0]