def check_source(self, context, block_device_info, vol_drvs): """Check the source host Here we check the source host to see if it's capable of migrating the instance to the destination host. There may be conditions that can only be checked on the source side. Also, get the instance ready for the migration by removing any virtual optical devices attached to the LPAR. :param context: security context :param block_device_info: result of _get_instance_block_device_info :param vol_drvs: volume drivers for the attached volumes :returns: a dict containing migration info """ lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance, self.drvr.host_uuid) self.lpar_w = lpar_w LOG.debug('Dest Migration data: %s' % self.dest_data) # Only 'migrate_data' is sent to the destination on prelive call. mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)} self.src_data['migrate_data'] = mig_data LOG.debug('Src Migration data: %s' % self.src_data) # Check proc compatability modes if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in self.dest_data['dest_proc_compat'].split(',')): raise LiveMigrationProcCompat( name=self.instance.name, mode=lpar_w.proc_compat_mode, modes=', '.join(self.dest_data['dest_proc_compat'].split(','))) # Check if VM is ready for migration self._check_migration_ready(lpar_w, self.drvr.host_wrapper) if lpar_w.migration_state != 'Not_Migrating': raise LiveMigrationInvalidState(name=self.instance.name, state=lpar_w.migration_state) # Check the number of migrations for capacity _verify_migration_capacity(self.drvr.host_wrapper, self.instance) # Get the 'source' pre-migration data for the volume drivers. Should # automatically update the mig_data dictionary as needed. for vol_drv in vol_drvs: vol_drv.pre_live_migration_on_source(mig_data) # Remove the VOpt devices LOG.debug('Removing VOpt.', instance=self.instance) media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid).dlt_vopt(lpar_w.uuid) LOG.debug('Removing VOpt finished.', instance=self.instance) # Ensure the vterm is non-active vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) return self.src_data
def delete_lpar(adapter, instance): """Delete an LPAR. :param adapter: The adapter for the pypowervm API. :param instance: The nova instance corresponding to the lpar to delete. """ lpar_uuid = get_pvm_uuid(instance) # Attempt to delete the VM. To avoid failures due to open vterm, we will # attempt to close the vterm before issuing the delete. try: LOG.info('Deleting virtual machine.', instance=instance) # Ensure any vterms are closed. Will no-op otherwise. vterm.close_vterm(adapter, lpar_uuid) # Run the LPAR delete resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) LOG.info('Virtual machine delete status: %d', resp.status, instance=instance) return resp except pvm_exc.HttpError as e: with excutils.save_and_reraise_exception(logger=LOG) as sare: if e.response and e.response.status == 404: # LPAR is already gone - don't fail sare.reraise = False LOG.info('Virtual Machine not found', instance=instance) else: LOG.error('HttpError deleting virtual machine.', instance=instance) except pvm_exc.Error: with excutils.save_and_reraise_exception(logger=LOG): # Attempting to close vterm did not help so raise exception LOG.error('Virtual machine delete failed: LPARID=%s', lpar_uuid)
def dlt_lpar(adapter, lpar_uuid): """Delete an LPAR :param adapter: The adapter for the pypowervm API :param lpar_uuid: The lpar to delete """ # Attempt to delete the VM. try: LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid) # Ensure any vterms are closed. Will no-op otherwise. vterm.close_vterm(adapter, lpar_uuid) # Run the LPAR delete resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) LOG.info(_LI('Virtual machine delete status: %d'), resp.status) return resp except pvm_exc.HttpError as e: if e.response and e.response.status == 404: LOG.warning(_LW('Virtual Machine not found LPAR_ID: %s'), lpar_uuid) else: LOG.error(_LE('HttpError deleting virtual machine. LPARID: %s'), lpar_uuid) raise except pvm_exc.Error: # Attempting to close vterm did not help so raise exception LOG.error(_LE('Virtual machine delete failed: LPARID=%s'), lpar_uuid) raise
def check_source(self, context, block_device_info, vol_drvs): """Check the source host Here we check the source host to see if it's capable of migrating the instance to the destination host. There may be conditions that can only be checked on the source side. Also, get the instance ready for the migration by removing any virtual optical devices attached to the LPAR. :param context: security context :param block_device_info: result of _get_instance_block_device_info :param vol_drvs: volume drivers for the attached volumes :returns: a dict containing migration info """ lpar_w = vm.get_instance_wrapper( self.drvr.adapter, self.instance, self.drvr.host_uuid) self.lpar_w = lpar_w LOG.debug('Dest Migration data: %s' % self.dest_data) # Only 'migrate_data' is sent to the destination on prelive call. mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)} self.src_data['migrate_data'] = mig_data LOG.debug('Src Migration data: %s' % self.src_data) # Check proc compatability modes if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in self.dest_data['dest_proc_compat'].split(',')): raise LiveMigrationProcCompat( name=self.instance.name, mode=lpar_w.proc_compat_mode, modes=', '.join(self.dest_data['dest_proc_compat'].split(','))) # Check if VM is ready for migration self._check_migration_ready(lpar_w, self.drvr.host_wrapper) if lpar_w.migration_state != 'Not_Migrating': raise LiveMigrationInvalidState(name=self.instance.name, state=lpar_w.migration_state) # Check the number of migrations for capacity _verify_migration_capacity(self.drvr.host_wrapper, self.instance) # Get the 'source' pre-migration data for the volume drivers. Should # automatically update the mig_data dictionary as needed. for vol_drv in vol_drvs: vol_drv.pre_live_migration_on_source(mig_data) # Remove the VOpt devices LOG.debug('Removing VOpt.', instance=self.instance) media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid ).dlt_vopt(lpar_w.uuid) LOG.debug('Removing VOpt finished.', instance=self.instance) # Ensure the vterm is non-active vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) return self.src_data
def delete_lpar(adapter, instance): """Delete an LPAR :param adapter: The adapter for the pypowervm API. :param instance: The nova instance whose LPAR is to be deleted. """ lpar_uuid = get_pvm_uuid(instance) # Attempt to delete the VM. try: LOG.info('Deleting LPAR', instance=instance) # Ensure any vterms are closed. Will no-op otherwise. vterm.close_vterm(adapter, lpar_uuid) # Run the LPAR delete resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) LOG.info('LPAR delete status: %d', resp.status, instance=instance) return resp except pvm_exc.HttpNotFound: LOG.info('LPAR not found (already deleted).', instance=instance)
def dlt_lpar(adapter, lpar_uuid): """Delete an LPAR :param adapter: The adapter for the pypowervm API :param lpar_uuid: The lpar to delete """ # Attempt to delete the VM. If delete fails because of vterm # we will close the vterm and try the delete again try: LOG.info(_LI("Deleting virtual machine. LPARID: %s"), lpar_uuid) # Ensure any vterms are closed. Will no-op otherwise. vterm.close_vterm(adapter, lpar_uuid) # Run the LPAR delete resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) LOG.info(_LI("Virtual machine delete status: %d"), resp.status) return resp except pvm_exc.Error: # Attempting to close vterm did not help so raise exception LOG.error(_LE("Virtual machine delete failed: LPARID=%s"), lpar_uuid) raise
def dlt_lpar(adapter, lpar_uuid): """Delete an LPAR :param adapter: The adapter for the pypowervm API :param lpar_uuid: The lpar to delete """ # Attempt to delete the VM. If delete fails because of vterm # we will close the vterm and try the delete again try: LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid) # Ensure any vterms are closed. Will no-op otherwise. vterm.close_vterm(adapter, lpar_uuid) # Run the LPAR delete resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid) LOG.info(_LI('Virtual machine delete status: %d'), resp.status) return resp except pvm_exc.Error: # Attempting to close vterm did not help so raise exception LOG.error(_LE('Virtual machine delete failed: LPARID=%s'), lpar_uuid) raise
def check_source(self, context, block_device_info, vol_drvs): """Check the source host Here we check the source host to see if it's capable of migrating the instance to the destination host. There may be conditions that can only be checked on the source side. Also, get the instance ready for the migration by removing any virtual optical devices attached to the LPAR. :param context: security context :param block_device_info: result of _get_instance_block_device_info :param vol_drvs: volume drivers for the attached volumes :returns: a PowerVMLiveMigrateData object """ lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance) self.lpar_w = lpar_w LOG.debug('Dest Migration data: %s' % self.mig_data) # Check proc compatibility modes if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in self.mig_data.dest_proc_compat.split(',')): msg = (_("Cannot migrate %(name)s because its " "processor compatibility mode %(mode)s " "is not in the list of modes \"%(modes)s\" " "supported by the target host.") % dict(name=self.instance.name, mode=lpar_w.proc_compat_mode, modes=', '.join( self.mig_data.dest_proc_compat.split(',')))) raise exception.MigrationPreCheckError(reason=msg) # Check if VM is ready for migration self._check_migration_ready(lpar_w, self.drvr.host_wrapper) if lpar_w.migration_state != 'Not_Migrating': msg = (_("Live migration of instance '%(name)s' failed because " "the migration state is: %(state)s") % dict(name=self.instance.name, state=lpar_w.migration_state)) raise exception.MigrationPreCheckError(reason=msg) # Check the number of migrations for capacity _verify_migration_capacity(self.drvr.host_wrapper, self.instance) self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter) # Get the 'source' pre-migration data for the volume drivers. vol_data = {} for vol_drv in vol_drvs: vol_drv.pre_live_migration_on_source(vol_data) self.mig_data.vol_data = vol_data LOG.debug('Src Migration data: %s' % self.mig_data) # Create a FeedTask to scrub any orphaned mappings/storage associated # with this LPAR. (Don't run it yet - we want to do the VOpt removal # within the same FeedTask.) stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter, lpar_w.id) # Add subtasks to remove the VOpt devices under the same FeedTask. media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid ).dlt_vopt(lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False) # Now execute the FeedTask, performing both scrub and VOpt removal. stg_ftsk.execute() # Ensure the vterm is non-active vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) return self.mig_data
def check_source(self, context, block_device_info, vol_drvs): """Check the source host Here we check the source host to see if it's capable of migrating the instance to the destination host. There may be conditions that can only be checked on the source side. Also, get the instance ready for the migration by removing any virtual optical devices attached to the LPAR. :param context: security context :param block_device_info: result of _get_instance_block_device_info :param vol_drvs: volume drivers for the attached volumes :returns: a PowerVMLiveMigrateData object """ lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance) self.lpar_w = lpar_w LOG.debug('Dest Migration data: %s', self.mig_data, instance=self.instance) # Check proc compatibility modes if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in self.mig_data.dest_proc_compat.split(',')): msg = (_("Cannot migrate %(name)s because its " "processor compatibility mode %(mode)s " "is not in the list of modes \"%(modes)s\" " "supported by the target host.") % dict(name=self.instance.name, mode=lpar_w.proc_compat_mode, modes=', '.join( self.mig_data.dest_proc_compat.split(',')))) raise exception.MigrationPreCheckError(reason=msg) # Check if VM is ready for migration self._check_migration_ready(lpar_w, self.drvr.host_wrapper) if lpar_w.migration_state != 'Not_Migrating': msg = (_("Live migration of instance '%(name)s' failed because " "the migration state is: %(state)s") % dict(name=self.instance.name, state=lpar_w.migration_state)) raise exception.MigrationPreCheckError(reason=msg) # Check the number of migrations for capacity _verify_migration_capacity(self.drvr.host_wrapper, self.instance) self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter) # Get the 'source' pre-migration data for the volume drivers. vol_data = {} for vol_drv in vol_drvs: vol_drv.pre_live_migration_on_source(vol_data) self.mig_data.vol_data = vol_data LOG.debug('Source migration data: %s', self.mig_data, instance=self.instance) # Create a FeedTask to scrub any orphaned mappings/storage associated # with this LPAR. (Don't run it yet - we want to do the VOpt removal # within the same FeedTask.) stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter, lpar_w.id) # Add subtasks to remove the VOpt devices under the same FeedTask. media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt( lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False) # Now execute the FeedTask, performing both scrub and VOpt removal. stg_ftsk.execute() # Ensure the vterm is non-active vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) return self.mig_data