def get_image_vm_generation(self, root_vhd_path, image_meta): image_props = image_meta['properties'] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): LOG.error( _LE('Requested VM Generation %s is not supported on ' ' this OS.'), image_prop_vm) raise vmutils.HyperVException( _('Requested VM Generation %s is not supported on this ' 'OS.') % image_prop_vm) vm_gen = VM_GENERATIONS[image_prop_vm] if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): LOG.error( _LE('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.'), vm_gen) raise vmutils.HyperVException( _('Requested VM Generation %s, but provided VHD instead of ' 'VHDX.') % vm_gen) return vm_gen
def _check_hotplug_is_available(self, instance): if self._get_vm_state(instance.name) == constants.HYPERV_VM_STATE_DISABLED: return False if not self._hostutils.check_min_windows_version(6, 4): LOG.error(_LE("This version of Windows does not support vNIC " "hot plugging.")) raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) if self._vmutils.get_vm_gen(instance.name) == constants.VM_GEN_1: LOG.error(_LE("Cannot hot plug vNIC to a first generation " "VM.")) raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) return True
def destroy(self, instance, network_info=None, block_device_info=None, destroy_disks=True): instance_name = instance.name LOG.info(_LI("Got request to destroy instance"), instance=instance) try: if self._vmutils.vm_exists(instance_name): # Stop the VM first. self._vmutils.stop_vm_jobs(instance_name) self.power_off(instance) self._vmutils.destroy_vm(instance_name) self._volumeops.disconnect_volumes(block_device_info) else: LOG.debug("Instance not found", instance=instance) if destroy_disks: self._delete_disk_files(instance_name) if network_info: for vif in network_info: vif_driver = self._get_vif_driver(vif.get('type')) vif_driver.unplug(instance, vif) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to destroy instance: %s'), instance_name)
def attach_volume(self, connection_info, instance_name, disk_bus=constants.CTRL_TYPE_SCSI): self.ensure_share_mounted(connection_info) disk_path = self.get_mounted_disk_path_from_volume(connection_info) try: if disk_bus == constants.CTRL_TYPE_IDE: ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) slot = 0 else: ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._vmutils.get_free_controller_slot(ctrller_path) self._vmutils.attach_drive(instance_name, disk_path, ctrller_path, slot) except vmutils.HyperVException as exn: LOG.exception(_LE('Attach volume failed to %(instance_name)s: ' '%(exn)s'), {'instance_name': instance_name, 'exn': exn}) raise exception.VolumeAttachFailed( volume_id=connection_info['data']['volume_id'], reason=exn.message)
def _check_hotplug_is_available(self, instance): if (self._get_vm_state( instance.name) == constants.HYPERV_VM_STATE_DISABLED): return False if not self._hostutils.check_min_windows_version(6, 4): LOG.error( _LE("This version of Windows does not support vNIC " "hot plugging.")) raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) if (self._vmutils.get_vm_gen(instance.name) == constants.VM_GEN_1): LOG.error(_LE("Cannot hot plug vNIC to a first generation " "VM.")) raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) return True
def login_storage_target(self, connection_info): data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] target_portal = data['target_portal'] auth_method = data.get('auth_method') auth_username = data.get('auth_username') auth_password = data.get('auth_password') if auth_method and auth_method.upper() != 'CHAP': LOG.error(_LE("Cannot log in target %(target_iqn)s. Unsupported " "iSCSI authentication method: %(auth_method)s."), {'target_iqn': target_iqn, 'auth_method': auth_method}) raise exception.UnsupportedBDMVolumeAuthMethod( auth_method=auth_method) # Check if we already logged in if self._volutils.get_device_number_for_target(target_iqn, target_lun): LOG.debug("Already logged in on storage target. No need to " "login. Portal: %(target_portal)s, " "IQN: %(target_iqn)s, LUN: %(target_lun)s", {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) else: LOG.debug("Logging in on storage target. Portal: " "%(target_portal)s, IQN: %(target_iqn)s, " "LUN: %(target_lun)s", {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) self._volutils.login_storage_target(target_lun, target_iqn, target_portal, auth_username, auth_password) # Wait for the target to be mounted self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def remove_from_cluster(self, instance): try: self._clustutils.delete(instance.name) self._instance_map.pop(instance.name, None) except os_win_exc.HyperVClusterException: LOG.exception(_LE('Removing instance from cluster failed.'), instance=instance)
def attach_volume(self, connection_info, instance_name, disk_bus=constants.CTRL_TYPE_SCSI): volume_driver = self._get_volume_driver(connection_info) volume_connected = False try: volume_driver.connect_volume(connection_info) volume_connected = True volume_driver.attach_volume(connection_info, instance_name, disk_bus=disk_bus) qos_specs = connection_info['data'].get('qos_specs') or {} min_iops, max_iops = self.parse_disk_qos_specs(qos_specs) if min_iops or max_iops: volume_driver.set_disk_qos_specs(connection_info, min_iops, max_iops) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unable to attach volume to instance %s'), instance_name) # Even if the attach failed, some cleanup may be needed. If # the volume could not be connected, it surely is not attached. if volume_connected: volume_driver.detach_volume(connection_info, instance_name) volume_driver.disconnect_volume(connection_info)
def _migrate_vm(self, ctxt, vm_name, host): try: instance_uuid = self._vmutils.get_instance_uuid(vm_name) if not instance_uuid: LOG.info( _LI('VM "%s" running on this host was not created by ' 'nova. Skip migrating this vm to a new host.'), vm_name) return instance = objects.Instance.get_by_uuid(ctxt, instance_uuid) if instance.vm_state == vm_states.ACTIVE: self._api.live_migrate(ctxt, instance, block_migration=False, disk_over_commit=False, host_name=None) else: self._api.resize(ctxt, instance, flavor_id=None, clean_shutdown=True) self._wait_for_instance_pending_task(ctxt, instance_uuid) except Exception as e: LOG.error(_LE('Migrating vm failed with error: %s '), e) raise exception.MigrationError(reason='Unable to migrate %s.' % vm_name)
def _looper(): try: self._clustutils.monitor_vm_failover(self._failover_migrate) except Exception: LOG.exception( _LE('Exception occured during failover ' 'observation / migration.'))
def destroy(self, instance, network_info=None, block_device_info=None, destroy_disks=True): instance_name = instance.name LOG.info(_LI("Got request to destroy instance"), instance=instance) try: if self._vmutils.vm_exists(instance_name): # Stop the VM first. self._vmutils.stop_vm_jobs(instance_name) self.power_off(instance) self._vmutils.destroy_vm(instance_name) self._volumeops.disconnect_volumes(block_device_info) else: LOG.debug("Instance not found", instance=instance) if destroy_disks: self._delete_disk_files(instance_name) self.unplug_vifs(instance, network_info) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to destroy instance: %s'), instance_name)
def add_to_cluster(self, instance): try: self._clustutils.add_vm_to_cluster(instance.name) self._instance_map[instance.name] = instance.uuid except os_win_exc.HyperVClusterException: LOG.exception(_LE('Adding instance to cluster failed.'), instance=instance)
def destroy(self, instance, network_info=None, block_device_info=None, destroy_disks=True): instance_name = instance.name LOG.info(_LI("Got request to destroy instance"), instance=instance) try: if self._vmutils.vm_exists(instance_name): self.power_off(instance) self._vmutils.destroy_vm(instance_name) self._volumeops.disconnect_volumes(block_device_info) else: LOG.debug("Instance not found", instance=instance) if destroy_disks: self._delete_disk_files(instance_name) if network_info: for vif in network_info: vif_driver = self._get_vif_driver(vif.get('type')) vif_driver.unplug(instance, vif) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to destroy instance: %s'), instance_name)
def _ovs_vsctl(args): full_args = ['ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args try: return utils.execute(*full_args) except Exception as e: LOG.error(_LE("Unable to execute %(cmd)s. Exception: %(exception)s"), {'cmd': full_args, 'exception': e}) raise exception.AgentError(method=full_args)
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format( root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason)
def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason)
def connect_volume(self, connection_info): connection_properties = connection_info['data'] auth_method = connection_properties.get('auth_method') if auth_method and auth_method.upper() != 'CHAP': LOG.error( _LE("Unsupported iSCSI authentication " "method: %(auth_method)s."), dict(auth_method=auth_method)) raise exception.UnsupportedBDMVolumeAuthMethod( auth_method=auth_method) volume_connected = False for (initiator_name, target_portal, target_iqn, target_lun) in self._get_all_paths(connection_properties): try: msg = _LI("Attempting to estabilish an iSCSI session to " "target %(target_iqn)s on portal %(target_portal)s " "acessing LUN %(target_lun)s using initiator " "%(initiator_name)s.") LOG.info( msg, dict(target_portal=target_portal, target_iqn=target_iqn, target_lun=target_lun, initiator_name=initiator_name)) self._iscsi_utils.login_storage_target( target_lun=target_lun, target_iqn=target_iqn, target_portal=target_portal, auth_username=connection_properties.get('auth_username'), auth_password=connection_properties.get('auth_password'), mpio_enabled=CONF.hyperv.use_multipath_io, initiator_name=initiator_name) volume_connected = True if not CONF.hyperv.use_multipath_io: break except os_win_exc.OSWinException: LOG.exception(_LE("Could not connect iSCSI target %s."), target_iqn) if not volume_connected: raise exception.VolumeAttachFailed( _("Could not connect volume %s.") % connection_properties['volume_id'])
def connect_volume(self, connection_info): connection_properties = connection_info['data'] auth_method = connection_properties.get('auth_method') if auth_method and auth_method.upper() != 'CHAP': LOG.error(_LE("Unsupported iSCSI authentication " "method: %(auth_method)s."), dict(auth_method=auth_method)) raise exception.UnsupportedBDMVolumeAuthMethod( auth_method=auth_method) volume_connected = False for (initiator_name, target_portal, target_iqn, target_lun) in self._get_all_paths(connection_properties): try: msg = _LI("Attempting to estabilish an iSCSI session to " "target %(target_iqn)s on portal %(target_portal)s " "acessing LUN %(target_lun)s using initiator " "%(initiator_name)s.") LOG.info(msg, dict(target_portal=target_portal, target_iqn=target_iqn, target_lun=target_lun, initiator_name=initiator_name)) self._iscsi_utils.login_storage_target( target_lun=target_lun, target_iqn=target_iqn, target_portal=target_portal, auth_username=connection_properties.get('auth_username'), auth_password=connection_properties.get('auth_password'), mpio_enabled=CONF.hyperv.use_multipath_io, initiator_name=initiator_name) volume_connected = True if not CONF.hyperv.use_multipath_io: break except os_win_exc.OSWinException: LOG.exception(_LE("Could not connect iSCSI target %s."), target_iqn) if not volume_connected: raise exception.VolumeAttachFailed( _("Could not connect volume %s.") % connection_properties['volume_id'])
def _verify_setup(self): if CONF.hyperv.use_multipath_io: mpio_enabled = self._hostutils.check_server_feature( self._hostutils.FEATURE_MPIO) if not mpio_enabled: err_msg = _LE( "Using multipath connections for iSCSI and FC disks " "requires the Multipath IO Windows feature to be " "enabled. MPIO must be configured to claim such devices.") raise exception.ServiceUnavailable(err_msg)
def _check_minimum_windows_version(self): if not utilsfactory.get_hostutils().check_min_windows_version(6, 2): # the version is of Windows is older than Windows Server 2012 R2. # Log an error, letting users know that this version is not # supported any longer. LOG.error(_LE('You are running nova-compute on an unsupported ' 'version of Windows (older than Windows / Hyper-V ' 'Server 2012). The support for this version of ' 'Windows has been removed in Mitaka.')) raise exception.HypervisorTooOld(version='6.2')
def _create_config_drive(self, instance, injected_files, admin_password, network_info, rescue=False): if CONF.config_drive_format != 'iso9660': raise vmutils.UnsupportedConfigDriveFormatException( _('Invalid config_drive_format "%s"') % CONF.config_drive_format) LOG.info(_LI('Using config drive for instance'), instance=instance) extra_md = {} if admin_password and CONF.hyperv.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=injected_files, extra_md=extra_md, network_info=network_info) configdrive_path_iso = self._pathutils.get_configdrive_path( instance.name, constants.DVD_FORMAT, rescue=rescue) LOG.info(_LI('Creating config drive at %(path)s'), {'path': configdrive_path_iso}, instance=instance) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(configdrive_path_iso) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Creating config drive failed with ' 'error: %s'), e, instance=instance) if not CONF.hyperv.config_drive_cdrom: configdrive_path = self._pathutils.get_configdrive_path( instance.name, constants.DISK_FORMAT_VHD, rescue=rescue) utils.execute(CONF.hyperv.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', configdrive_path_iso, configdrive_path, attempts=1) self._pathutils.remove(configdrive_path_iso) else: configdrive_path = configdrive_path_iso return configdrive_path
def _cleanup_failed_disk_migration(self, instance_path, revert_path, dest_path): try: if dest_path and self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) if self._pathutils.exists(revert_path): self._pathutils.move_folder_files(revert_path, instance_path) except Exception as ex: # Log and ignore this exception LOG.exception(ex) LOG.error(_LE("Cannot cleanup migration files"))
def _get_conn_v2(self, host="localhost"): try: return wmi.WMI(moniker="//%s/root/virtualization/v2" % host) except wmi.x_wmi as ex: LOG.exception(_LE("Get version 2 connection error")) if ex.com_error.hresult == -2147217394: msg = _('Live migration is not supported on target host "%s"') % host elif ex.com_error.hresult == -2147023174: msg = _('Target live migration host "%s" is unreachable') % host else: msg = _("Live migration failed: %s") % ex.message raise vmutils.HyperVException(msg)
def get_image_vm_generation(self, instance_id, image_meta): image_props = image_meta['properties'] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _LE('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return VM_GENERATIONS[image_prop_vm]
def get_image_vm_generation(self, root_vhd_path, image_meta): image_props = image_meta["properties"] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): LOG.error(_LE("Requested VM Generation %s is not supported on " " this OS."), image_prop_vm) raise vmutils.HyperVException( _("Requested VM Generation %s is not supported on this " "OS.") % image_prop_vm ) vm_gen = VM_GENERATIONS[image_prop_vm] if ( vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD ): LOG.error(_LE("Requested VM Generation %s, but provided VHD " "instead of VHDX."), vm_gen) raise vmutils.HyperVException( _("Requested VM Generation %s, but provided VHD instead of " "VHDX.") % vm_gen ) return vm_gen
def _get_conn_v2(self, host='localhost'): try: return wmi.WMI(moniker='//%s/root/virtualization/v2' % host) except wmi.x_wmi as ex: LOG.exception(_LE('Get version 2 connection error')) if ex.com_error.hresult == -2147217394: msg = ( _('Live migration is not supported on target host "%s"') % host) elif ex.com_error.hresult == -2147023174: msg = (_('Target live migration host "%s" is unreachable') % host) else: msg = _('Live migration failed: %s') % ex.message raise vmutils.HyperVException(msg)
def _set_vm_state(self, instance, req_state): instance_name = instance.name try: self._vmutils.set_vm_state(instance_name, req_state) LOG.debug("Successfully changed state of VM %(instance_name)s" " to: %(req_state)s", {'instance_name': instance_name, 'req_state': req_state}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to change vm state of %(instance_name)s" " to %(req_state)s"), {'instance_name': instance_name, 'req_state': req_state})
def _migrate_vm(self, ctxt, vm_name, host): try: instance_uuid = self._vmutils.get_instance_uuid(vm_name) if not instance_uuid: LOG.info(_LI('VM "%s" running on this host was not created by ' 'nova. Skip migrating this vm to a new host.'), vm_name) return instance = objects.Instance.get_by_uuid(ctxt, instance_uuid) if instance.vm_state == vm_states.ACTIVE: self._api.live_migrate(ctxt, instance, block_migration=False, disk_over_commit=False, host_name=None) else: self._api.resize(ctxt, instance, flavor_id=None, clean_shutdown=True) self._wait_for_instance_pending_task(ctxt, instance_uuid) except Exception as e: LOG.error(_LE('Migrating vm failed with error: %s '), e) raise exception.MigrationError(reason='Unable to migrate %s.' % vm_name)
def attach_volume(self, connection_info, instance_name, ebs_root=False): """Attach a volume to the SCSI controller or to the IDE controller if ebs_root is True """ target_iqn = None LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s", {'connection_info': connection_info, 'instance_name': instance_name}) try: self.login_storage_target(connection_info) data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] # Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) if ebs_root: # Find the IDE controller for the vm. ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) # Attaching to the first slot slot = 0 else: # Find the SCSI controller for the vm ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._vmutils.get_free_controller_slot(ctrller_path) self._vmutils.attach_volume_to_controller(instance_name, ctrller_path, slot, mounted_disk_path) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to attach volume to instance %s'), instance_name) if target_iqn: self.logout_storage_target(target_iqn)
def attach_volume(self, connection_info, instance_name, disk_bus=constants.CTRL_TYPE_SCSI): """Attach a volume to the SCSI controller or to the IDE controller if ebs_root is True """ target_iqn = None LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s", {'connection_info': connection_info, 'instance_name': instance_name}) try: self.login_storage_target(connection_info) mounted_disk_path = self.get_mounted_disk_path_from_volume( connection_info) if disk_bus == constants.CTRL_TYPE_IDE: # Find the IDE controller for the vm. ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) # Attaching to the first slot slot = 0 else: # Find the SCSI controller for the vm ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._vmutils.get_free_controller_slot(ctrller_path) self._vmutils.attach_volume_to_controller(instance_name, ctrller_path, slot, mounted_disk_path) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to attach volume to instance %s'), instance_name) target_iqn = connection_info['data']['target_iqn'] if target_iqn: self.logout_storage_target(target_iqn)
def attach_volume(self, connection_info, instance_name, ebs_root=False): self.ensure_share_mounted(connection_info) disk_path = self._get_disk_path(connection_info) try: if ebs_root: ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) slot = 0 else: ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._vmutils.get_free_controller_slot(ctrller_path) self._vmutils.attach_drive(instance_name, disk_path, ctrller_path, slot) except vmutils.HyperVException as exn: LOG.exception(_LE('Attach volume failed: %s'), exn) raise vmutils.HyperVException(_('Unable to attach volume ' 'to instance %s') % instance_name)
def _neutron_failed_callback(self, event_name, instance): LOG.error(_LE('Neutron Reported failure on event %s'), event_name, instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException()
def _snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance.""" instance_name = instance.name LOG.debug("Creating snapshot for instance %s", instance_name) snapshot_path = self._vmutils.take_vm_snapshot(instance_name) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) export_dir = None try: src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) LOG.debug("Getting info for VHD %s", src_vhd_path) src_base_disk_path = self._vhdutils.get_vhd_parent_path( src_vhd_path) export_dir = self._pathutils.get_export_dir(instance_name) dest_vhd_path = os.path.join(export_dir, os.path.basename( src_vhd_path)) LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s', {'src_vhd_path': src_vhd_path, 'dest_vhd_path': dest_vhd_path}) self._pathutils.copyfile(src_vhd_path, dest_vhd_path) image_vhd_path = None if not src_base_disk_path: image_vhd_path = dest_vhd_path else: basename = os.path.basename(src_base_disk_path) dest_base_disk_path = os.path.join(export_dir, basename) LOG.debug('Copying base disk %(src_vhd_path)s to ' '%(dest_base_disk_path)s', {'src_vhd_path': src_vhd_path, 'dest_base_disk_path': dest_base_disk_path}) self._pathutils.copyfile(src_base_disk_path, dest_base_disk_path) LOG.debug("Reconnecting copied base VHD " "%(dest_base_disk_path)s and diff " "VHD %(dest_vhd_path)s", {'dest_base_disk_path': dest_base_disk_path, 'dest_vhd_path': dest_vhd_path}) self._vhdutils.reconnect_parent_vhd(dest_vhd_path, dest_base_disk_path) LOG.debug("Merging diff disk %s into its parent.", dest_vhd_path) self._vhdutils.merge_vhd(dest_vhd_path) image_vhd_path = dest_base_disk_path LOG.debug("Updating Glance image %(image_id)s with content from " "merged disk %(image_vhd_path)s", {'image_id': image_id, 'image_vhd_path': image_vhd_path}) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) self._save_glance_image(context, image_id, image_vhd_path) LOG.debug("Snapshot image %(image_id)s updated for VM " "%(instance_name)s", {'image_id': image_id, 'instance_name': instance_name}) finally: try: LOG.debug("Removing snapshot %s", image_id) self._vmutils.remove_vm_snapshot(snapshot_path) except Exception: LOG.exception(_LE('Failed to remove snapshot for VM %s'), instance_name, instance=instance) if export_dir: LOG.debug('Removing directory: %s', export_dir) self._pathutils.rmtree(export_dir)
def _looper(): try: self._clustutils.monitor_vm_failover(self._failover_migrate) except Exception: LOG.exception(_LE('Exception occured during failover ' 'observation / migration.'))