def get_image_vm_generation(self, root_vhd_path, image_meta): image_props = image_meta['properties'] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): LOG.error( _LE('Requested VM Generation %s is not supported on ' ' this OS.'), image_prop_vm) raise vmutils.HyperVException( _('Requested VM Generation %s is not supported on this ' 'OS.') % image_prop_vm) vm_gen = VM_GENERATIONS[image_prop_vm] if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): LOG.error( _LE('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.'), vm_gen) raise vmutils.HyperVException( _('Requested VM Generation %s, but provided VHD instead of ' 'VHDX.') % vm_gen) return vm_gen
def enable_remotefx_video_adapter(self, vm_name, monitor_count, max_resolution): vm = self._lookup_vm_check(vm_name) max_res_value = self._remote_fx_res_map.get(max_resolution) if max_res_value is None: raise vmutils.HyperVException( _("Unsupported RemoteFX resolution: " "%s") % max_resolution) synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0] if not synth_3d_video_pool.IsGpuCapable: raise vmutils.HyperVException( _("To enable RemoteFX on Hyper-V at " "least one GPU supporting DirectX " "11 is required")) if not synth_3d_video_pool.IsSlatCapable: raise vmutils.HyperVException( _("To enable RemoteFX on Hyper-V it " "is required that the host CPUs " "support SLAT")) vmsettings = vm.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) rasds = vmsettings[0].associators( wmi_result_class=self._CIM_RES_ALLOC_SETTING_DATA_CLASS) if [ r for r in rasds if r.ResourceSubType == self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE ]: raise vmutils.HyperVException( _("RemoteFX is already configured " "for this VM")) synth_disp_ctrl_res_list = [ r for r in rasds if r.ResourceSubType == self._SYNTH_DISP_CTRL_RES_SUB_TYPE ] if synth_disp_ctrl_res_list: self._remove_virt_resource(synth_disp_ctrl_res_list[0], vm.path_()) synth_3d_disp_ctrl_res = self._get_new_resource_setting_data( self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE, self._SYNTH_3D_DISP_ALLOCATION_SETTING_DATA_CLASS) synth_3d_disp_ctrl_res.MaximumMonitors = monitor_count synth_3d_disp_ctrl_res.MaximumScreenResolution = max_res_value self._add_virt_resource(synth_3d_disp_ctrl_res, vm.path_()) s3_disp_ctrl_res = [ r for r in rasds if r.ResourceSubType == self._S3_DISP_CTRL_RES_SUB_TYPE ][0] s3_disp_ctrl_res.Address = self._DISP_CTRL_ADDRESS_DX_11 self._modify_virt_resource(s3_disp_ctrl_res, vm.path_())
def check_live_migration_config(self): conn_v2 = self._get_conn_v2() migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0] vsmssds = migration_svc.associators( wmi_association_class='Msvm_ElementSettingData', wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData') vsmssd = vsmssds[0] if not vsmssd.EnableVirtualSystemMigration: raise vmutils.HyperVException( _('Live migration is not enabled on this host')) if not migration_svc.MigrationServiceListenerIPAddressList: raise vmutils.HyperVException( _('Live migration networks are not configured on this host'))
def rescue_instance(self, context, instance, network_info, image_meta, rescue_password): rescue_image_id = image_meta.get('id') or instance.image_ref rescue_vhd_path = self._create_root_vhd( context, instance, rescue_image_id=rescue_image_id) rescue_vm_gen = self.get_image_vm_generation(rescue_vhd_path, image_meta) vm_gen = self._vmutils.get_vm_gen(instance.name) if rescue_vm_gen != vm_gen: err_msg = _('The requested rescue image requires a different VM ' 'generation than the actual rescued instance. ' 'Rescue image VM generation: %(rescue_vm_gen)s. ' 'Rescued instance VM generation: %(vm_gen)s.') raise vmutils.HyperVException(err_msg % { 'rescue_vm_gen': rescue_vm_gen, 'vm_gen': vm_gen }) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) if not root_vhd_path: err_msg = _('Instance root disk image could not be found. ' 'Rescuing instances booted from volume is ' 'not supported.') raise vmutils.HyperVException(err_msg) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) self._attach_drive(instance.name, rescue_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, drive_type=constants.DISK) if configdrive.required_by(instance): self._detach_config_drive(instance.name) rescue_configdrive_path = self._create_config_drive( instance, injected_files=None, admin_password=rescue_password, network_info=network_info, rescue=True) self.attach_config_drive(instance, rescue_configdrive_path, vm_gen) self.power_on(instance)
def parse_disk_qos_specs(self, qos_specs): total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0)) min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0)) total_iops = int(qos_specs.get('total_iops_sec', self._bytes_per_sec_to_iops( total_bytes_sec))) min_iops = int(qos_specs.get('min_iops_sec', self._bytes_per_sec_to_iops( min_bytes_sec))) if total_iops and total_iops < min_iops: err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater " "than maximum IOPS. " "Requested minimum IOPS: %(min_iops)s " "Requested maximum IOPS: %(total_iops)s.") % {'min_iops': min_iops, 'total_iops': total_iops}) raise vmutils.HyperVException(err_msg) unsupported_specs = [spec for spec in qos_specs if spec not in self._SUPPORTED_QOS_SPECS] if unsupported_specs: LOG.warn(_LW('Ignoring unsupported qos specs: ' '%(unsupported_specs)s. ' 'Supported qos specs: %(supported_qos_speces)s'), {'unsupported_specs': unsupported_specs, 'supported_qos_speces': self._SUPPORTED_QOS_SPECS}) return min_iops, total_iops
def execute(self, *args, **kwargs): stdout_value, stderr_value = utils.execute(*args, **kwargs) if stdout_value.find('The operation completed successfully') == -1: raise vmutils.HyperVException( _('An error has occurred when ' 'calling the iscsi initiator: %s') % stdout_value) return stdout_value
def start(self): try: self._open_pipe() if self._log_file_path: self._log_file_handle = open(self._log_file_path, 'ab', 1) jobs = [self._read_from_pipe] if (self._input_queue and self._connect_event): jobs.append(self._write_to_pipe) for job in jobs: worker = threading.Thread(target=job) worker.setDaemon(True) worker.start() self._workers.append(worker) except Exception as err: msg = (_("Named pipe handler failed to initialize. " "Pipe Name: %(pipe_name)s " "Error: %(err)s") % { 'pipe_name': self._pipe_name, 'err': err }) LOG.error(msg) self.stop() raise vmutils.HyperVException(msg)
def _get_vm_serial_port_mapping(self): serial_port_conns = self._vmutils.get_vm_serial_port_connections( self._instance_name) if not serial_port_conns: err_msg = _("No suitable serial port pipe was found " "for instance %(instance_name)s") raise vmutils.HyperVException( err_msg % {'instance_name': self._instance_name}) serial_port_mapping = {} # At the moment, we tag the pipes by using a pipe path suffix # as we can't use the serial port ElementName attribute because of # a Hyper-V bug. for pipe_path in serial_port_conns: port_type = pipe_path[-2:] if port_type in [ constants.SERIAL_PORT_TYPE_RO, constants.SERIAL_PORT_TYPE_RW ]: serial_port_mapping[port_type] = pipe_path else: serial_port_mapping[constants.SERIAL_PORT_TYPE_RW] = pipe_path return serial_port_mapping
def login_storage_target(self, connection_info): data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] target_portal = data['target_portal'] auth_method = data.get('auth_method') auth_username = data.get('auth_username') auth_password = data.get('auth_password') if auth_method and auth_method.upper() != 'CHAP': raise vmutils.HyperVException( _("Cannot log in target %(target_iqn)s. Unsupported iSCSI " "authentication method: %(auth_method)s.") % {'target_iqn': target_iqn, 'auth_method': auth_method}) # Check if we already logged in if self._volutils.get_device_number_for_target(target_iqn, target_lun): LOG.debug("Already logged in on storage target. No need to " "login. Portal: %(target_portal)s, " "IQN: %(target_iqn)s, LUN: %(target_lun)s", {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) else: LOG.debug("Logging in on storage target. Portal: " "%(target_portal)s, IQN: %(target_iqn)s, " "LUN: %(target_lun)s", {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) self._volutils.login_storage_target(target_lun, target_iqn, target_portal, auth_username, auth_password) # Wait for the target to be mounted self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if (instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path)): err_msg = _('Missing instance root and/or rescue image. ' 'The instance cannot be unrescued.') raise vmutils.HyperVException(err_msg) vm_gen = self._vmutils.get_vm_gen(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists. configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance)
def create_instance(self, instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen, image_meta): instance_name = instance.name instance_path = os.path.join(CONF.instances_path, instance_name) self._vmutils.create_vm(instance_name, instance.memory_mb, instance.vcpus, CONF.hyperv.limit_cpu_features, CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path, [instance.uuid]) flavor_extra_specs = instance.flavor.extra_specs remote_fx_config = flavor_extra_specs.get( constants.FLAVOR_REMOTE_FX_EXTRA_SPEC_KEY) if remote_fx_config: if vm_gen == constants.VM_GEN_2: raise vmutils.HyperVException(_("RemoteFX is not supported " "on generation 2 virtual " "machines.")) else: self._configure_remotefx(instance, remote_fx_config) self._vmutils.create_scsi_controller(instance_name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] ctrl_disk_addr = 0 if root_vhd_path: self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr, controller_type) ctrl_disk_addr += 1 if eph_vhd_path: self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr, controller_type) # If ebs_root is False, the first volume will be attached to SCSI # controller. Generation 2 VMs only has a SCSI controller. ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None self._volumeops.attach_volumes(block_device_info, instance_name, ebs_root) serial_ports = self._get_image_serial_port_settings(image_meta) self._create_vm_com_port_pipes(instance, serial_ports) self._set_instance_disk_qos_specs(instance) for vif in network_info: LOG.debug('Creating nic for instance', instance=instance) self._vmutils.create_nic(instance_name, vif['id'], vif['address']) vif_driver = self._get_vif_driver(vif.get('type')) vif_driver.plug(instance, vif) if CONF.hyperv.enable_instance_metrics_collection: self._vmutils.enable_vm_metrics_collection(instance_name)
def _get_vm(self, conn_v2, vm_name): vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if not n: raise exception.NotFound(_('VM not found: %s') % vm_name) elif n > 1: raise vmutils.HyperVException( _('Duplicate VM name found: %s') % vm_name) return vms[0]
def create_dynamic_vhd(self, path, max_internal_size, format): vhd_format = self._vhd_format_map.get(format) if not vhd_format: raise vmutils.HyperVException( _("Unsupported disk format: %s") % format) self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path, max_internal_size=max_internal_size)
def create_dynamic_vhd(self, path, max_internal_size, format): if format != constants.DISK_FORMAT_VHD: raise vmutils.HyperVException( _("Unsupported disk format: %s") % format) image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk( Path=path, MaxInternalSize=max_internal_size) self._vmutils.check_ret_val(ret_val, job_path)
def _check_and_attach_config_drive(self, instance, vm_gen): if configdrive.required_by(instance): configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self._vmops.attach_config_drive(instance, configdrive_path, vm_gen) else: raise vmutils.HyperVException( _("Config drive is required by instance: %s, " "but it does not exist.") % instance.name)
def _get_virt_utils_class(v1_class, v2_class): # The "root/virtualization" WMI namespace is no longer supported on # Windows Server / Hyper-V Server 2012 R2 / Windows 8.1 # (kernel version 6.3) or above. if (CONF.hyperv.force_hyperv_utils_v1 and get_hostutils().check_min_windows_version(6, 3)): raise vmutils.HyperVException( _('The "force_hyperv_utils_v1" option cannot be set to "True" ' 'on Windows Server / Hyper-V Server 2012 R2 or above as the WMI ' '"root/virtualization" namespace is no longer supported.')) return _get_class(v1_class, v2_class, CONF.hyperv.force_hyperv_utils_v1)
def _get_vhd_dynamic_blk_size(self, vhd_path): blk_size_offset = VHD_BLK_SIZE_OFFSET try: with open(vhd_path, "rb") as f: f.seek(blk_size_offset) version = f.read(4) except IOError: raise vmutils.HyperVException( _("Unable to obtain block size from" " VHD %(vhd_path)s") % {"vhd_path": vhd_path}) return struct.unpack('>i', version)[0]
def mount_smb_share(self, smbfs_share, username=None, password=None): try: LOG.debug('Mounting share: %s', smbfs_share) self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share, UserName=username, Password=password) except wmi.x_wmi as exc: err_msg = (_( 'Unable to mount SMBFS share: %(smbfs_share)s ' 'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share, 'wmi_exc': exc}) raise vmutils.HyperVException(err_msg)
def get_external_vswitch(self, vswitch_name): if vswitch_name: vswitches = self._conn.Msvm_VirtualEthernetSwitch( ElementName=vswitch_name) if not len(vswitches): raise vmutils.HyperVException( _('vswitch "%s" not found') % vswitch_name) else: # Find the vswitch that is connected to the first physical nic. ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0] lep = ext_port.associators(wmi_result_class='Msvm_LANEndpoint')[0] lep1 = lep.associators(wmi_result_class='Msvm_LANEndpoint')[0] esw = lep1.associators( wmi_result_class='Msvm_EthernetSwitchPort')[0] vswitches = esw.associators( wmi_result_class='Msvm_VirtualEthernetSwitch') if not len(vswitches): raise vmutils.HyperVException(_('No external vswitch found')) return vswitches[0].path_()
def _configure_remotefx(self, instance, config): if not CONF.hyperv.enable_remotefx: raise vmutils.HyperVException( _("enable_remotefx configuration option needs to be set to " "True in order to use RemoteFX")) if not self._hostutils.check_server_feature( self._hostutils.FEATURE_RDS_VIRTUALIZATION): raise vmutils.HyperVException( _("The RDS-Virtualization feature must be installed " "in order to use RemoteFX")) instance_name = instance.name LOG.debug('Configuring RemoteFX for instance: %s', instance_name) (remotefx_max_resolution, remotefx_monitor_count) = config.split(',') remotefx_monitor_count = int(remotefx_monitor_count) self._vmutils.enable_remotefx_video_adapter(instance_name, remotefx_monitor_count, remotefx_max_resolution)
def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): LOG.debug("finish_migration called", instance=instance) instance_name = instance.name if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) if not root_vhd_path: raise vmutils.HyperVException( _("Cannot find boot VHD " "file for instance: %s") % instance_name) root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) src_base_disk_path = root_vhd_info.get("ParentPath") if src_base_disk_path: self._check_base_disk(context, instance, root_vhd_path, src_base_disk_path) if resize_instance: new_size = instance.root_gb * units.Gi self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size) eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name) if resize_instance: new_size = instance.get('ephemeral_gb', 0) * units.Gi if not eph_vhd_path: if new_size: eph_vhd_path = self._vmops.create_ephemeral_vhd(instance) else: eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path) self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size) vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen, image_meta) self._check_and_attach_config_drive(instance, vm_gen) if power_on: self._vmops.power_on(instance)
def _resize_and_cache_vhd(self, instance, vhd_path): vhd_info = self._vhdutils.get_vhd_info(vhd_path) vhd_size = vhd_info['MaxInternalSize'] root_vhd_size_gb = self._get_root_vhd_size_gb(instance) root_vhd_size = root_vhd_size_gb * units.Gi root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( vhd_path, root_vhd_size)) if root_vhd_internal_size < vhd_size: raise vmutils.HyperVException( _("Cannot resize the image to a size smaller than the VHD " "max. internal size: %(vhd_size)s. Requested disk size: " "%(root_vhd_size)s") % { 'vhd_size': vhd_size, 'root_vhd_size': root_vhd_size }) if root_vhd_internal_size > vhd_size: path_parts = os.path.splitext(vhd_path) resized_vhd_path = '%s_%s%s' % (path_parts[0], root_vhd_size_gb, path_parts[1]) @utils.synchronized(resized_vhd_path) def copy_and_resize_vhd(): if not self._pathutils.exists(resized_vhd_path): try: LOG.debug( "Copying VHD %(vhd_path)s to " "%(resized_vhd_path)s", { 'vhd_path': vhd_path, 'resized_vhd_path': resized_vhd_path }) self._pathutils.copyfile(vhd_path, resized_vhd_path) LOG.debug( "Resizing VHD %(resized_vhd_path)s to new " "size %(root_vhd_size)s", { 'resized_vhd_path': resized_vhd_path, 'root_vhd_size': root_vhd_size }) self._vhdutils.resize_vhd(resized_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(resized_vhd_path): self._pathutils.remove(resized_vhd_path) copy_and_resize_vhd() return resized_vhd_path
def login_storage_target(self, target_lun, target_iqn, target_portal, auth_username=None, auth_password=None): """Ensure that the target is logged in.""" self._login_target_portal(target_portal) retry_count = CONF.hyperv.volume_attach_retry_count # If the target is not connected, at least two iterations are needed: # one for performing the login and another one for checking if the # target was logged in successfully. if retry_count < 2: retry_count = 2 for attempt in xrange(retry_count): target = self._conn_storage.query("SELECT * FROM MSFT_iSCSITarget " "WHERE NodeAddress='%s' " % target_iqn) if target and target[0].IsConnected: if attempt == 0: # The target was already connected but an update may be # required target[0].Update() return try: target = self._conn_storage.MSFT_iSCSITarget auth = {} if auth_username and auth_password: auth['AuthenticationType'] = self._CHAP_AUTH_TYPE auth['ChapUsername'] = auth_username auth['ChapSecret'] = auth_password target.Connect(NodeAddress=target_iqn, IsPersistent=True, **auth) time.sleep(CONF.hyperv.volume_attach_retry_interval) except wmi.x_wmi as exc: LOG.debug("Attempt %(attempt)d to connect to target " "%(target_iqn)s failed. Retrying. " "WMI exception: %(exc)s " % { 'target_iqn': target_iqn, 'exc': exc, 'attempt': attempt }) raise vmutils.HyperVException( _('Failed to login target %s') % target_iqn)
def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size): """VHDX Size = Header (1 MB) + Log + Metadata Region + BAT + Payload Blocks Chunk size = maximum number of bytes described by a SB block = 2 ** 23 * LogicalSectorSize """ vhd_format = self.get_vhd_format(vhd_path) if vhd_format == constants.DISK_FORMAT_VHD: return super(VHDUtilsV2, self).get_internal_vhd_size_by_file_size( vhd_path, new_vhd_file_size) else: vhd_info = self.get_vhd_info(vhd_path) vhd_type = vhd_info['Type'] if vhd_type == self._VHD_TYPE_DIFFERENCING: vhd_parent = self.get_vhd_parent_path(vhd_path) return self.get_internal_vhd_size_by_file_size( vhd_parent, new_vhd_file_size) else: try: with open(vhd_path, 'rb') as f: hs = VHDX_HEADER_SECTION_SIZE bes = VHDX_BAT_ENTRY_SIZE lss = vhd_info['LogicalSectorSize'] bs = self._get_vhdx_block_size(f) ls = self._get_vhdx_log_size(f) ms = self._get_vhdx_metadata_size_and_offset(f)[0] chunk_ratio = (1 << 23) * lss / bs size = new_vhd_file_size max_internal_size = ( bs * chunk_ratio * (size - hs - ls - ms - bes - bes / chunk_ratio) / (bs * chunk_ratio + bes * chunk_ratio + bes)) return max_internal_size - (max_internal_size % bs) except IOError as ex: raise vmutils.HyperVException( _("Unable to obtain " "internal size from VHDX: " "%(vhd_path)s. Exception: " "%(ex)s") % { "vhd_path": vhd_path, "ex": ex })
def _get_conn_v2(self, host='localhost'): try: return wmi.WMI(moniker='//%s/root/virtualization/v2' % host) except wmi.x_wmi as ex: LOG.exception(_LE('Get version 2 connection error')) if ex.com_error.hresult == -2147217394: msg = ( _('Live migration is not supported on target host "%s"') % host) elif ex.com_error.hresult == -2147023174: msg = (_('Target live migration host "%s" is unreachable') % host) else: msg = _('Live migration failed: %s') % ex.message raise vmutils.HyperVException(msg)
def get_vhd_format(self, path): with open(path, 'rb') as f: # Read header if f.read(8) == VHDX_SIGNATURE: return constants.DISK_FORMAT_VHDX # Read footer f.seek(0, 2) file_size = f.tell() if file_size >= 512: f.seek(-512, 2) if f.read(8) == VHD_SIGNATURE: return constants.DISK_FORMAT_VHD raise vmutils.HyperVException(_('Unsupported virtual disk format'))
def create_vswitch_port(self, vswitch_path, port_name): switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] # Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort(Name=str(uuid.uuid4()), FriendlyName=port_name, ScopeOfResidence="", VirtualSwitch=vswitch_path) if ret_val != 0: raise vmutils.HyperVException( _("Failed to create vswitch port " "%(port_name)s on switch " "%(vswitch_path)s") % { 'port_name': port_name, 'vswitch_path': vswitch_path }) return new_port
def _setup_socket(self): try: self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._sock.bind((self._addr, self._port)) self._sock.listen(1) except socket.error as err: self._sock.close() msg = (_('Failed to initialize serial proxy on' '%(addr)s:%(port)s, handling connections ' 'to instance %(instance_name)s. Error: %(error)s') % { 'addr': self._addr, 'port': self._port, 'instance_name': self._instance_name, 'error': err }) raise vmutils.HyperVException(msg)
def get_console_output(self, instance_name): console_log_paths = self._pathutils.get_vm_console_log_paths( instance_name) try: log = '' # Start with the oldest console log file. for log_path in console_log_paths[::-1]: if os.path.exists(log_path): with open(log_path, 'rb') as fp: log += fp.read() return log except IOError as err: msg = (_("Could not get instance %(instance_name)s " "console output. Error: %(err)s") % {'instance_name': instance_name, 'err': err}) raise vmutils.HyperVException(msg)
def _verify_rescue_image(self, instance, rescue_image_id, rescue_image_path): rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path) rescue_image_size = rescue_image_info['MaxInternalSize'] flavor_disk_size = instance.root_gb * units.Gi if rescue_image_size > flavor_disk_size: err_msg = _('Using a rescue image bigger than the instance ' 'flavor disk size is not allowed. ' 'Rescue image size: %(rescue_image_size)s. ' 'Flavor disk size:%(flavor_disk_size)s. ' 'Rescue image id %(rescue_image_id)s.') raise vmutils.HyperVException( err_msg % { 'rescue_image_size': rescue_image_size, 'flavor_disk_size': flavor_disk_size, 'rescue_image_id': rescue_image_id })