Beispiel #1
0
    def instance_disk_iter(self, instance, disk_type=DiskType.BOOT, lpar_wrap=None):
        """Return the instance's storage element wrapper of the specified type.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param disk_type: The type of disk to find, one of the DiskType enum
                          values.
        :param lpar_wrap: pypowervm.wrappers.logical_partition.LPAR
                          corresponding to the instance.  If not specified, it
                          will be retrieved; i.e. specify this parameter to
                          save on REST calls.
        :return: Iterator of tuples of (storage_elem, VIOS), where storage_elem
                 is a storage element wrapper (pypowervm.wrappers.storage.VOpt,
                 VDisk, PV, or LU) associated with the instance; and VIOS is
                 the wrapper of the Virtual I/O Server owning that storage
                 element.
        """
        if lpar_wrap is None:
            lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid)
        match_func = self.disk_match_func(disk_type, instance)
        for vios_uuid in self.vios_uuids:
            vios_wrap = pvm_vios.VIOS.wrap(
                self.adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid, xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])
            )
            for scsi_map in tsk_map.find_maps(
                vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id, match_func=match_func
            ):
                yield scsi_map.backing_storage, vios_wrap
Beispiel #2
0
    def _update_nvram(self, instance=None):
        """Perform an update of NVRAM for instance.

        :param instance: The instance to update or if not specified pull the
                         next one off the list to update.
        """
        if instance is None:
            uuid, instance = self._pop_from_list()
            if uuid is None:
                return
        else:
            # Remove any pending updates
            self._pop_from_list(uuid=instance.uuid)

        try:
            LOG.debug('Updating NVRAM.', instance=instance)
            data = vm.get_instance_wrapper(self._adapter,
                                           instance,
                                           xag=[pvm_const.XAG.NVRAM]).nvram
            LOG.debug('NVRAM for instance: %s', data, instance=instance)
            if data is not None:
                self._api.store(instance, data)
        except pvm_exc.Error:
            # Update exceptions should not end the operation.
            LOG.exception('Could not update NVRAM.', instance=instance)
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a dict containing migration info
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance,
                                         self.drvr.host_uuid)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.dest_data)

        # Only 'migrate_data' is sent to the destination on prelive call.
        mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)}
        self.src_data['migrate_data'] = mig_data
        LOG.debug('Src Migration data: %s' % self.src_data)

        # Check proc compatability modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.dest_data['dest_proc_compat'].split(',')):
            raise LiveMigrationProcCompat(
                name=self.instance.name,
                mode=lpar_w.proc_compat_mode,
                modes=', '.join(self.dest_data['dest_proc_compat'].split(',')))

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            raise LiveMigrationInvalidState(name=self.instance.name,
                                            state=lpar_w.migration_state)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        # Get the 'source' pre-migration data for the volume drivers.  Should
        # automatically update the mig_data dictionary as needed.
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(mig_data)

        # Remove the VOpt devices
        LOG.debug('Removing VOpt.', instance=self.instance)
        media.ConfigDrivePowerVM(self.drvr.adapter,
                                 self.drvr.host_uuid).dlt_vopt(lpar_w.uuid)
        LOG.debug('Removing VOpt finished.', instance=self.instance)

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.src_data
Beispiel #4
0
    def _update_nvram(self, instance_uuid=None):
        """Perform an update of NVRAM for instance.

        :param instance_uuid: The instance uuid of the instance to update or if
                              not specified pull the next one off the set to
                              update.
        """
        if instance_uuid is None:
            instance_uuid = self._pop_from_set()
            if instance_uuid is None:
                return
        else:
            # Remove any pending updates
            self._pop_from_set(uuid=instance_uuid)

        try:
            LOG.debug('Updating NVRAM for instance with uuid: %s',
                      instance_uuid)
            data = vm.get_instance_wrapper(self._adapter,
                                           instance_uuid,
                                           xag=[pvm_const.XAG.NVRAM]).nvram
            LOG.debug('NVRAM for instance with uuid %(uuid)s: %(data)s', {
                'uuid': instance_uuid,
                'data': data
            })
            if data is not None:
                self._api.store(instance_uuid, data)
        except pvm_exc.Error:
            # Update exceptions should not end the operation.
            LOG.exception('Could not update NVRAM for instance with uuid %s.',
                          instance_uuid)
Beispiel #5
0
    def instance_disk_iter(self,
                           instance,
                           disk_type=DiskType.BOOT,
                           lpar_wrap=None):
        """Return the instance's storage element wrapper of the specified type.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param disk_type: The type of disk to find, one of the DiskType enum
                          values.
        :param lpar_wrap: pypowervm.wrappers.logical_partition.LPAR
                          corresponding to the instance.  If not specified, it
                          will be retrieved; i.e. specify this parameter to
                          save on REST calls.
        :return: Iterator of tuples of (storage_elem, VIOS), where storage_elem
                 is a storage element wrapper (pypowervm.wrappers.storage.VOpt,
                 VDisk, PV, or LU) associated with the instance; and VIOS is
                 the wrapper of the Virtual I/O Server owning that storage
                 element.
        """
        if lpar_wrap is None:
            lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
        match_func = self.disk_match_func(disk_type, instance)
        for vios_uuid in self.vios_uuids:
            vios_wrap = pvm_vios.VIOS.get(self.adapter,
                                          uuid=vios_uuid,
                                          xag=[pvm_const.XAG.VIO_SMAP])
            for scsi_map in tsk_map.find_maps(vios_wrap.scsi_mappings,
                                              client_lpar_id=lpar_wrap.id,
                                              match_func=match_func):
                yield scsi_map.backing_storage, vios_wrap
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a dict containing migration info
        """

        lpar_w = vm.get_instance_wrapper(
            self.drvr.adapter, self.instance, self.drvr.host_uuid)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.dest_data)

        # Only 'migrate_data' is sent to the destination on prelive call.
        mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)}
        self.src_data['migrate_data'] = mig_data
        LOG.debug('Src Migration data: %s' % self.src_data)

        # Check proc compatability modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
                self.dest_data['dest_proc_compat'].split(',')):
            raise LiveMigrationProcCompat(
                name=self.instance.name, mode=lpar_w.proc_compat_mode,
                modes=', '.join(self.dest_data['dest_proc_compat'].split(',')))

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            raise LiveMigrationInvalidState(name=self.instance.name,
                                            state=lpar_w.migration_state)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        # Get the 'source' pre-migration data for the volume drivers.  Should
        # automatically update the mig_data dictionary as needed.
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(mig_data)

        # Remove the VOpt devices
        LOG.debug('Removing VOpt.', instance=self.instance)
        media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
                                 ).dlt_vopt(lpar_w.uuid)
        LOG.debug('Removing VOpt finished.', instance=self.instance)

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.src_data
Beispiel #7
0
    def disconnect_volume(self):
        """Disconnect the volume."""
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
                                         self.host_uuid)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(volume_id=self.volume_id,
                                         instance_name=self.instance.name,
                                         reason=reason)

        # Run the disconnect
        self._disconnect_volume()

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
Beispiel #8
0
    def disconnect_volume(self):
        """Disconnect the volume."""
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
                                         self.host_uuid)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(
                volume_id=self.volume_id, instance_name=self.instance.name,
                reason=reason)

        # Run the disconnect
        self._disconnect_volume()

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
Beispiel #9
0
    def connect_instance_disk_to_mgmt(self, instance):
        """Connect an instance's boot disk to the management partition.

        :param instance: The instance whose boot disk is to be mapped.
        :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
        :return vios: The EntryWrapper of the VIOS from which the mapping was
                      made.
        :raise InstanceDiskMappingFailed: If the mapping could not be done.
        """
        msg_args = {"instance_name": instance.name}
        lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid)
        for stg_elem, vios in self.instance_disk_iter(instance, lpar_wrap=lpar_wrap):
            msg_args["disk_name"] = stg_elem.name
            msg_args["vios_name"] = vios.name

            # Create a new mapping.  NOTE: If there's an existing mapping on
            # the other VIOS but not this one, we'll create a second mapping
            # here.  It would take an extreme sequence of events to get to that
            # point, and the second mapping would be harmless anyway. The
            # alternative would be always checking all VIOSes for existing
            # mappings, which increases the response time of the common case by
            # an entire GET of VIOS+SCSI_MAPPING.
            LOG.debug(
                "Mapping boot disk %(disk_name)s of instance "
                "%(instance_name)s to the management partition from "
                "Virtual I/O Server %(vios_name)s.",
                msg_args,
            )
            try:
                tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, stg_elem)
                # If that worked, we're done.  add_vscsi_mapping logged.
                return stg_elem, vios
            except Exception as e:
                msg_args["exc"] = e
                LOG.warning(
                    _LW(
                        "Failed to map boot disk %(disk_name)s of "
                        "instance %(instance_name)s to the management "
                        "partition from Virtual I/O Server "
                        "%(vios_name)s: %(exc)s"
                    ),
                    msg_args,
                )
                # Try the next hit, if available.
        # We either didn't find the boot dev, or failed all attempts to map it.
        raise npvmex.InstanceDiskMappingFailed(**msg_args)
Beispiel #10
0
    def disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is detached from the VM.
        """
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(
                volume_id=self.volume_id, instance_name=self.instance.name,
                reason=reason)

        # Run the disconnect
        self._disconnect_volume(slot_mgr)

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
Beispiel #11
0
    def connect_instance_disk_to_mgmt(self, instance):
        """Connect an instance's boot disk to the management partition.

        :param instance: The instance whose boot disk is to be mapped.
        :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
        :return vios: The EntryWrapper of the VIOS from which the mapping was
                      made.
        :raise InstanceDiskMappingFailed: If the mapping could not be done.
        """
        msg_args = {'instance_name': instance.name}
        lpar_wrap = vm.get_instance_wrapper(self.adapter, instance,
                                            self.host_uuid)
        for stg_elem, vios in self.instance_disk_iter(instance,
                                                      lpar_wrap=lpar_wrap):
            msg_args['disk_name'] = stg_elem.name
            msg_args['vios_name'] = vios.name

            # Create a new mapping.  NOTE: If there's an existing mapping on
            # the other VIOS but not this one, we'll create a second mapping
            # here.  It would take an extreme sequence of events to get to that
            # point, and the second mapping would be harmless anyway. The
            # alternative would be always checking all VIOSes for existing
            # mappings, which increases the response time of the common case by
            # an entire GET of VIOS+SCSI_MAPPING.
            LOG.debug(
                "Mapping boot disk %(disk_name)s of instance "
                "%(instance_name)s to the management partition from "
                "Virtual I/O Server %(vios_name)s.", msg_args)
            try:
                tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid,
                                          stg_elem)
                # If that worked, we're done.  add_vscsi_mapping logged.
                return stg_elem, vios
            except Exception as e:
                msg_args['exc'] = e
                LOG.warning(
                    _LW("Failed to map boot disk %(disk_name)s of "
                        "instance %(instance_name)s to the management "
                        "partition from Virtual I/O Server "
                        "%(vios_name)s: %(exc)s"), msg_args)
                # Try the next hit, if available.
        # We either didn't find the boot dev, or failed all attempts to map it.
        raise npvmex.InstanceDiskMappingFailed(**msg_args)
Beispiel #12
0
    def disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is detached from the VM.
        """
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(
                volume_id=self.volume_id, instance_name=self.instance.name,
                reason=reason)

        # Run the disconnect
        self._disconnect_volume(slot_mgr)

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
Beispiel #13
0
    def _get_data(self, instance):
        """Get the NVRAM data for the instance.

        :param inst: The instance to get the data for.
        :returns: The NVRAM data for the instance.
        """
        data = None
        try:
            # Get the data from the adapter.
            entry = vm.get_instance_wrapper(self._adapter, instance,
                                            xag=[pvm_const.XAG.NVRAM])
            data = entry.nvram
            LOG.debug('NVRAM for instance: %s', data, instance=instance)
        except pvm_exc.HttpError as e:
            # The VM might have been deleted since the store request.
            if e.response.status not in ['404']:
                LOG.exception(e)
                LOG.warning(_LW('Unable to store the NVRAM for instance: '
                                '%s'), instance.name)
        return data
Beispiel #14
0
    def _get_bootdisk_iter(self, instance):
        """Return an iterator of (storage_elem, VIOS) tuples for the instance.

        This method returns an iterator of (storage_elem, VIOS) tuples, where
        storage_elem is a pypowervm storage element wrapper associated with
        the instance boot disk and VIOS is the wrapper of the Virtual I/O
        server owning that storage element.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :return: Iterator of tuples of (storage_elem, VIOS).
        """
        lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
        match_func = self._disk_match_func(DiskType.BOOT, instance)
        for vios_uuid in self.vios_uuids:
            vios_wrap = pvm_vios.VIOS.get(self.adapter,
                                          uuid=vios_uuid,
                                          xag=[pvm_const.XAG.VIO_SMAP])
            for scsi_map in tsk_map.find_maps(vios_wrap.scsi_mappings,
                                              client_lpar_id=lpar_wrap.id,
                                              match_func=match_func):
                yield scsi_map.backing_storage, vios_wrap
Beispiel #15
0
    def _get_data(self, instance):
        """Get the NVRAM data for the instance.

        :param inst: The instance to get the data for.
        :returns: The NVRAM data for the instance.
        """
        data = None
        try:
            # Get the data from the adapter.
            entry = vm.get_instance_wrapper(self._adapter,
                                            instance,
                                            xag=[pvm_const.XAG.NVRAM])
            data = entry.nvram
            LOG.debug('NVRAM for instance: %s', data, instance=instance)
        except pvm_exc.HttpError as e:
            # The VM might have been deleted since the store request.
            if e.response.status not in [404]:
                LOG.exception(e)
                LOG.warning(
                    _LW('Unable to store the NVRAM for instance: '
                        '%s'), instance.name)
        return data
Beispiel #16
0
 def execute_impl(self):
     return vm.get_instance_wrapper(self.adapter, self.instance)
Beispiel #17
0
 def execute(self):
     return vm.get_instance_wrapper(self.adapter, self.instance)
Beispiel #18
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name, state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Source migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt(
            lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
Beispiel #19
0
 def execute(self):
     return vm.get_instance_wrapper(self.adapter, self.instance,
                                    self.host_uuid)
Beispiel #20
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.mig_data)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
                self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name,
                        state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Src Migration data: %s' % self.mig_data)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
                                 ).dlt_vopt(lpar_w.uuid, stg_ftsk=stg_ftsk,
                                            remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data