Ejemplo n.º 1
0
    def revert_impl(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(
            _LW('Volume %(vol)s for instance %(inst)s to be '
                'disconnected'), {
                    'vol': self.vol_id,
                    'inst': self.vol_drv.instance.name
                })

        # Note that the rollback is *instant*.  Resetting the FeedTask ensures
        # immediate rollback.
        self.vol_drv.reset_stg_ftsk()
        try:
            # We attempt to disconnect in case we 'partially connected'.  In
            # the connect scenario, perhaps one of the Virtual I/O Servers
            # was connected.  This attempts to clear anything out to make sure
            # the terminate connection runs smoothly.
            self.vol_drv.disconnect_volume(self.slot_mgr)
        except npvmex.VolumeDetachFailed as e:
            # Only log that the volume detach failed.  Should not be blocking
            # due to being in the revert flow.
            LOG.warning(
                _LW("Unable to disconnect volume for %(inst)s during "
                    "rollback.  Error was: %(error)s"), {
                        'inst': self.vol_drv.instance.name,
                        'error': e.message
                    })
Ejemplo n.º 2
0
    def revert_impl(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(
            _LW('Volume %(vol)s for instance %(inst)s to be '
                're-connected'), {
                    'vol': self.vol_id,
                    'inst': self.vol_drv.instance.name
                })

        # Note that the rollback is *instant*.  Resetting the FeedTask ensures
        # immediate rollback.
        self.vol_drv.reset_stg_ftsk()
        try:
            # We try to reconnect the volume here so that it maintains its
            # linkage (in the hypervisor) to the VM.  This makes it easier for
            # operators to understand the linkage between the VMs and volumes
            # in error scenarios.  This is simply useful for debug purposes
            # if there is an operational error.
            self.vol_drv.connect_volume(self.slot_mgr)
        except npvmex.VolumeAttachFailed as e:
            # Only log that the volume attach failed.  Should not be blocking
            # due to being in the revert flow.  See comment above.
            LOG.warning(
                _LW("Unable to re-connect volume for %(inst)s during "
                    "rollback.  Error was: %(error)s"), {
                        'inst': self.vol_drv.instance.name,
                        'error': e.message
                    })
Ejemplo n.º 3
0
    def revert_impl(self, result, flow_failures):
        """Unmap the disk and then remove it from the management partition.

        We use this order to avoid rediscovering the device in case some other
        thread scans the SCSI bus between when we remove and when we unmap.
        """
        if self.vios_wrap is None or self.stg_elem is None:
            # We never even got connected - nothing to do
            return
        LOG.warning(
            _LW("Unmapping boot disk %(disk_name)s of instance "
                "%(instance_name)s from management partition via "
                "Virtual I/O Server %(vios_name)s."), {
                    'disk_name': self.stg_elem.name,
                    'instance_name': self.instance.name,
                    'vios_name': self.vios_wrap.name
                })
        self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
                                                self.stg_elem.name)

        if self.disk_path is None:
            # We did not discover the disk - nothing else to do.
            return
        LOG.warning(
            _LW("Removing disk %(disk_path)s from the management "
                "partition."), {'disk_path': self.disk_path})
        mgmt.remove_block_dev(self.disk_path)
Ejemplo n.º 4
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        Extends the base implementation, but before invoking it will remove
        itself from the bridge it is connected to and delete the corresponding
        trunk device on the mgmt partition.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Find the CNA for this vif.
        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'), {
                                'mac': vif['address'],
                                'inst': self.instance.name
                            },
                        instance=self.instance)
            return None

        # Find and delete the trunk adapters
        trunks = pvm_cna.find_trunks(self.adapter, cna_w)

        dev_name = self.get_trunk_dev_name(vif)
        utils.execute('ip', 'link', 'set', dev_name, 'down', run_as_root=True)
        try:
            utils.execute('brctl',
                          'delif',
                          vif['network']['bridge'],
                          dev_name,
                          run_as_root=True)
        except Exception as e:
            LOG.warning(_LW('Unable to delete device %(dev_name)s from bridge '
                            '%(bridge)s. Error: %(error)s'), {
                                'dev_name': dev_name,
                                'bridge': vif['network']['bridge'],
                                'error': e.message
                            },
                        instance=self.instance)
        for trunk in trunks:
            trunk.delete()

        # Now delete the client CNA
        return super(PvmLBVifDriver, self).unplug(vif, cna_w_list=cna_w_list)
Ejemplo n.º 5
0
    def rollback_live_migration_at_destination(self, vif, vea_vlan_mappings):
        """Rolls back the pre live migrate on the destination host.

        Will delete the TrunkAdapter that pre_live_migrate_at_destination
        created with its unique hypervisor VLAN.  This uses the
        vea_vlan_mappings to provide the information as to what TrunkAdapter
        it should remove.

        :param vif: The virtual interface that was being migrated.  This may be
                    called network_info in other portions of the code.
        :param vea_vlan_mappings: The VEA VLAN mappings.  Key is the vif
                                  mac address, value is the destination's
                                  target hypervisor VLAN.
        """
        LOG.warning(_LW("Rolling back the live migrate of VIF with mac "
                        "%(mac)s."), {'mac': vif['address']},
                    instance=self.instance)

        # We know that we just attached the VIF to the NovaLink VM.  Search
        # for a trunk adapter with the PVID and vSwitch that we specified
        # above.  This is guaranteed to be unique.
        vlan = int(vea_vlan_mappings[vif['address']])
        vswitch_id = pvm_net.VSwitch.search(
            self.adapter,
            parent_type=pvm_ms.System,
            one_result=True,
            name=CONF.powervm.pvm_vswitch_for_novalink_io).switch_id

        # Delete port from OVS
        linux_net.delete_ovs_vif_port(vif['network']['bridge'],
                                      self.get_trunk_dev_name(vif))

        # Find the trunk
        mgmt_wrap = pvm_par.get_this_partition(self.adapter)
        child_adpts = pvm_net.CNA.get(self.adapter, parent=mgmt_wrap)
        trunk = None
        for adpt in child_adpts:
            # We need a trunk adapter (so check trunk_pri).  Then the trunk
            # is unique by PVID and PowerVM vSwitch ID.
            if (adpt.pvid == vlan and adpt.vswitch_id == vswitch_id
                    and adpt.trunk_pri):
                trunk = adpt
                break

        if trunk:
            # Delete the peer'd trunk adapter.
            LOG.warning(_LW("Deleting target side trunk adapter %(dev)s for "
                            "rollback operation"), {'dev': trunk.dev_name},
                        instance=self.instance)
            trunk.delete()
Ejemplo n.º 6
0
    def revert(self, *args, **kwargs):
        LOG.info(_LW('Reverting task %(task)s for instance %(inst)s'),
                 {'task': self.name, 'inst': self.instance.name},
                 instance=self.instance)
        start_time = time.time()

        ret = self.revert_impl(*args, **kwargs)

        run_time = time.time() - start_time
        LOG.info(_LW('Revert task %(task)s completed in %(seconds)d seconds '
                     'for instance %(inst)s'),
                 {'task': self.name, 'inst': self.instance.name,
                  'seconds': run_time}, instance=self.instance)
        return ret
Ejemplo n.º 7
0
    def _fetch(self, object_key):
        # Check if the object exists.  If not, return a result accordingly.
        if not self._exists(object_key):
            return None, _('Object does not exist in Swift.')

        try:
            # Create a temp file for download into
            with tempfile.NamedTemporaryFile(delete=False) as f:
                options = {
                    'out_file': f.name
                }
            # The file is now created and closed for the swift client to use.
            results = self._run_operation(
                'download', container=self.container, objects=[object_key],
                options=options)
            for result in results:
                if result['success']:
                    with open(f.name, 'r') as f:
                        return f.read(), result
                else:
                    return None, result
        finally:
            try:
                os.remove(f.name)
            except Exception:
                LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
Ejemplo n.º 8
0
    def _fetch(self, object_key):
        # Check if the object exists.  If not, return a result accordingly.
        if not self._exists(object_key):
            return None, _('Object does not exist in Swift.')

        try:
            # Create a temp file for download into
            with tempfile.NamedTemporaryFile(delete=False) as f:
                options = {
                    'out_file': f.name
                }
            # The file is now created and closed for the swift client to use.
            results = self._run_operation(
                'download', container=self.container, objects=[object_key],
                options=options)
            for result in results:
                if result['success']:
                    with open(f.name, 'r') as f:
                        return f.read(), result
                else:
                    return None, result
        finally:
            try:
                os.remove(f.name)
            except Exception:
                LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
Ejemplo n.º 9
0
    def _remove_maps_for_fabric(self, fabric):
        """Removes the vFC storage mappings from the VM for a given fabric.

        :param fabric: The fabric to remove the mappings from.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        if not npiv_port_maps:
            # If no mappings exist, exit out of the method.
            return

        vios_wraps = self.stg_ftsk.feed

        for npiv_port_map in npiv_port_maps:
            ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
                                "%(inst)s for fabric %(fabric)s."),
                  {'inst': self.instance.name, 'fabric': fabric}]
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is not None:
                # Add the subtask to remove the specific map
                task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                task_wrapper.add_functor_subtask(
                    pvm_vfcm.remove_maps, self.vm_uuid,
                    port_map=npiv_port_map, logspec=ls)
            else:
                LOG.warning(_LW("No storage connections found between the "
                                "Virtual I/O Servers and FC Fabric "
                                "%(fabric)s."), {'fabric': fabric},
                            instance=self.instance)
Ejemplo n.º 10
0
        def _run_upload_operation():
            """Run the upload operation

            Attempts retry for a maximum number of two times. The upload
            operation will fail with ClientException, if there is an
            authentication error. The second attempt only happens if the
            first attempt failed with ClientException. A return value of
            True means we should retry, and False means no failure during
            upload, thus no retry is required.

            Raises RetryError if the upload failed during second attempt,
            as the number of attempts for retry is reached.

            """
            source = six.StringIO(data)
            obj = swft_srv.SwiftUploadObject(source, object_name=inst_key)

            results = self._run_operation('upload', self.container,
                                          [obj], options=options)
            for result in results:
                if not result['success']:
                    # TODO(arun-mani - Bug 1611011): Filed for updating swift
                    # client to return http status code in case of failure
                    if isinstance(result['error'], swft_exc.ClientException):
                        # If upload failed during nvram/slot_map update due to
                        # expired keystone token, retry swift-client operation
                        # to allow regeneration of token
                        LOG.warning(_LW('NVRAM upload failed due to invalid '
                                        'token. Retrying upload.'))
                        return True
                    # The upload failed.
                    raise api.NVRAMUploadException(instance=inst_name,
                                                   reason=result)
            return False
Ejemplo n.º 11
0
    def _cleanup_volume(self, udid=None, devname=None):
        """Cleanup the hdisk associated with this udid."""

        if not udid and not devname:
            LOG.warning(_LW('Could not remove hdisk for volume: %s'),
                        self.volume_id)
            return

        LOG.info(_LI('Removing hdisk for udid: %s'), udid)

        def find_hdisk_to_remove(vios_w):
            if devname is None:
                device_name = vios_w.hdisk_from_uuid(udid)
            else:
                device_name = devname
            if device_name is None:
                return
            LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'), {
                'hdisk': device_name,
                'vios': vios_w.name
            })
            self._add_remove_hdisk(vios_w,
                                   device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove',
            pvm_vios.VIOS.getter(self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(find_hdisk_to_remove,
                                           flag_update=False)
        rmv_hdisk_ftsk.execute()
Ejemplo n.º 12
0
 def _delete(self, key):
     try:
         self.store_api.delete_slot_map(key)
     except Exception:
         LOG.warning(_LW("Unable to delete the slot map from Swift backing "
                         "store with ID %(key)s.  Will require "
                         "manual cleanup."), {'key': key}, self.instance)
Ejemplo n.º 13
0
def dlt_lpar(adapter, lpar_uuid):
    """Delete an LPAR

    :param adapter: The adapter for the pypowervm API
    :param lpar_uuid: The lpar to delete
    """
    # Attempt to delete the VM.
    try:
        LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid)

        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)

        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info(_LI('Virtual machine delete status: %d'), resp.status)
        return resp
    except pvm_exc.HttpError as e:
        if e.response and e.response.status == 404:
            LOG.warning(_LW('Virtual Machine not found LPAR_ID: %s'),
                        lpar_uuid)
        else:
            LOG.error(_LE('HttpError deleting virtual machine. LPARID: %s'),
                      lpar_uuid)
            raise
    except pvm_exc.Error:
        # Attempting to close vterm did not help so raise exception
        LOG.error(_LE('Virtual machine delete failed: LPARID=%s'), lpar_uuid)
        raise
Ejemplo n.º 14
0
    def _cleanup_volume(self, udid):
        """Cleanup the hdisk associated with this udid."""

        if not udid:
            LOG.warning(_LW('Could not remove hdisk for volume: %s')
                        % self.volume_id)
            return

        LOG.info(_LI('Removing hdisk for udid: %s') % udid)

        def find_hdisk_to_remove(vios_w):
            device_name = vios_w.hdisk_from_uuid(udid)
            if device_name is None:
                return
            LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
                     {'hdisk': device_name, 'vios': vios_w.name})
            self._add_remove_hdisk(vios_w, device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove', pvm_vios.VIOS.getter(
                self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(
            find_hdisk_to_remove, flag_update=False)
        rmv_hdisk_ftsk.execute()
Ejemplo n.º 15
0
    def _remove_maps_for_fabric(self, fabric):
        """Removes the vFC storage mappings from the VM for a given fabric.

        :param fabric: The fabric to remove the mappings from.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        if not npiv_port_maps:
            # If no mappings exist, exit out of the method.
            return

        vios_wraps = self.stg_ftsk.feed

        for npiv_port_map in npiv_port_maps:
            ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
                                "%(inst)s for fabric %(fabric)s."),
                  {'inst': self.instance.name, 'fabric': fabric}]
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is not None:
                # Add the subtask to remove the specific map
                task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                task_wrapper.add_functor_subtask(
                    pvm_vfcm.remove_maps, self.vm_uuid,
                    port_map=npiv_port_map, logspec=ls)
            else:
                LOG.warning(_LW("No storage connections found between the "
                                "Virtual I/O Servers and FC Fabric "
                                "%(fabric)s."), {'fabric': fabric})
Ejemplo n.º 16
0
    def _discover_volume_on_vios(self, vios_w, volume_id):
        """Discovers an hdisk on a single vios for the volume.

        :param vios_w: VIOS wrapper to process
        :param volume_id: Volume to discover
        :returns: Status of the volume or None
        :returns: Device name or None
        :returns: LUN or None
        """
        # Get the initiatior WWPNs, targets and Lun for the given VIOS.
        vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)

        # Build the ITL map and discover the hdisks on the Virtual I/O
        # Server (if any).
        itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
        if len(itls) == 0:
            LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.'
                      % {'vios': vios_w.name, 'volume_id': volume_id})
            return None, None, None

        status, device_name, udid = hdisk.discover_hdisk(self.adapter,
                                                         vios_w.uuid, itls)

        if hdisk.good_discovery(status, device_name):
            LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
                     'volume %(volume_id)s. Status code: %(status)s.'),
                     {'hdisk': device_name, 'vios': vios_w.name,
                      'volume_id': volume_id, 'status': str(status)})
        elif status == hdisk.LUAStatus.DEVICE_IN_USE:
            LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s '
                            'on %(vios)s is in use. Error code: %(status)s.'),
                        {'dev': device_name, 'volume': volume_id,
                         'vios': vios_w.name, 'status': str(status)})

        return status, device_name, udid
Ejemplo n.º 17
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        Extends the base implementation, but before calling it will remove
        the adapter from the Open vSwitch and delete the trunk.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Find the CNA for this vif.
        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'),
                        {'mac': vif['address'], 'inst': self.instance.name})
            return None

        # Find and delete the trunk adapters
        trunks = pvm_cna.find_trunks(self.adapter, cna_w)
        dev = self.get_trunk_dev_name(vif)
        linux_net.delete_ovs_vif_port(vif['network']['bridge'], dev)
        for trunk in trunks:
            trunk.delete()

        # Now delete the client CNA
        return super(PvmOvsVifDriver, self).unplug(vif, cna_w_list=cna_w_list)
Ejemplo n.º 18
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # This is a default implementation that most implementations will
        # require.

        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'),
                        {'mac': vif['address'], 'inst': self.instance.name})
            return None

        LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'),
                 {'mac': vif['address'], 'inst': self.instance.name})
        try:
            cna_w.delete()
        except Exception as e:
            LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
                          '%(inst)s.'), {'mac': vif['address'],
                                         'inst': self.instance.name})
            LOG.exception(e)
            raise VirtualInterfaceUnplugException()
        return cna_w
Ejemplo n.º 19
0
 def _delete(self, key):
     try:
         self.store_api.delete_slot_map(key)
     except Exception:
         LOG.warning(_LW("Unable to delete the slot map from Swift backing "
                         "store with ID %(key)s.  Will require "
                         "manual cleanup."), {'key': key},
                     instance=self.instance)
Ejemplo n.º 20
0
 def execute(self):
     LOG.info(_LI('Finding disk for instance: %s'), self.instance.name)
     disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
     if not disk:
         LOG.warn(_LW('Disk not found: %(disk_name)s'),
                  {'disk_name': self.disk_dvr._get_disk_name(self.disk_type,
                                                             self.instance)
                   }, instance=self.instance)
     return disk
Ejemplo n.º 21
0
    def revert_impl(self, lpar_wrap, result, flow_failures):
        LOG.warning(_LW('Powering off instance: %s'), self.instance.name)

        if isinstance(result, task_fail.Failure):
            # The power on itself failed...can't power off.
            LOG.debug('Power on failed.  Not performing power off.')
            return

        power.power_off(lpar_wrap, self.host_uuid, force_immediate=True)
Ejemplo n.º 22
0
    def revert_impl(self, result, flow_failures):
        LOG.warning(_LW('Powering off instance: %s'), self.instance.name)

        if isinstance(result, task_fail.Failure):
            # The power on itself failed...can't power off.
            LOG.debug('Power on failed.  Not performing power off.')
            return

        vm.power_off(self.adapter, self.instance, force_immediate=True)
Ejemplo n.º 23
0
 def execute_impl(self):
     disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
     if not disk:
         LOG.warning(_LW('Disk not found: %(disk_name)s'), {
             'disk_name':
             self.disk_dvr._get_disk_name(self.disk_type, self.instance),
         },
                     instance=self.instance)
     return disk
Ejemplo n.º 24
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {
                          'inst': self.instance.name,
                          'reason': reason
                      },
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {
                                     'mac': vif['address'],
                                     'inst': self.instance.name
                                 },
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'), {
                                          'mac': vif['address'],
                                          'inst': self.instance.name
                                      },
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'), {
                                    'mac': vif['address'],
                                    'inst': self.instance.name
                                },
                            instance=self.instance)
        return cna_w_list
Ejemplo n.º 25
0
 def execute(self):
     LOG.info(_LI('Finding disk for instance: %s'), self.instance.name)
     disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
     if not disk:
         LOG.warning(_LW('Disk not found: %(disk_name)s'),
                     {'disk_name':
                         self.disk_dvr._get_disk_name(self.disk_type,
                                                      self.instance),
                      }, instance=self.instance)
     return disk
Ejemplo n.º 26
0
    def revert_impl(self, lpar_wrap, result, flow_failures):
        if not self.network_infos:
            return

        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('VIF creation being rolled back for instance '
                        '%(inst)s'), {'inst': self.instance.name},
                    instance=self.instance)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance)
        for network_info in self.crt_network_infos:
            try:
                vif.unplug(self.adapter, self.host_uuid, self.instance,
                           network_info, self.slot_mgr, cna_w_list=cna_w_list)
            except Exception as e:
                LOG.exception(e)
                LOG.warning(_LW("An exception occurred during an unplug "
                                "in the vif rollback.  Ignoring."),
                            instance=self.instance)
Ejemplo n.º 27
0
    def revert_impl(self, lpar_wrap, result, flow_failures):
        if not self.network_infos:
            return

        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('VIF creation being rolled back for instance '
                        '%(inst)s'), {'inst': self.instance.name},
                    instance=self.instance)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance)
        for network_info in self.network_infos:
            try:
                vif.unplug(self.adapter, self.host_uuid, self.instance,
                           network_info, self.slot_mgr, cna_w_list=cna_w_list)
            except Exception as e:
                LOG.exception(e)
                LOG.warning(_LW("An exception occurred during an unplug "
                                "in the vif rollback.  Ignoring."),
                            instance=self.instance)
Ejemplo n.º 28
0
 def rm_hdisk():
     LOG.info(_LI("Running remove for hdisk: '%s'") % device_name)
     try:
         # Attempt to remove the hDisk
         hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                            vio_wrap.uuid)
     except Exception as e:
         # If there is a failure, log it, but don't stop the process
         LOG.warning(_LW("There was an error removing the hdisk "
                     "%(disk)s from the Virtual I/O Server."),
                     {'disk': device_name})
         LOG.warning(e)
Ejemplo n.º 29
0
 def rm_hdisk():
     LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
     try:
         # Attempt to remove the hDisk
         hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                            vio_wrap.uuid)
     except Exception as e:
         # If there is a failure, log it, but don't stop the process
         LOG.warning(
             _LW("There was an error removing the hdisk "
                 "%(disk)s from the Virtual I/O Server."),
             {'disk': device_name})
         LOG.warning(e)
Ejemplo n.º 30
0
    def remove(self, instance):
        """Remove the stored NVRAM for an instance.

        :param instance: The instance for which the NVRAM will be removed.
        """
        # Remove any pending updates
        self._pop_from_list(uuid=instance.uuid)
        # Remove it from the store
        try:
            self._api.delete(instance)
        except Exception as e:
            # Delete exceptions should not end the operation
            LOG.warning(_LW('Could not delete NVRAM: %s'), e,
                        instance=instance)
Ejemplo n.º 31
0
    def revert(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
                        'disconnected'),
                    {'vol': self.vol_id, 'inst': self.vol_drv.instance.name})

        # Note that the rollback is *instant*.  Resetting the FeedTask ensures
        # immediate rollback.
        self.vol_drv.reset_stg_ftsk()
        try:
            # We attempt to disconnect in case we 'partially connected'.  In
            # the connect scenario, perhaps one of the Virtual I/O Servers
            # was connected.  This attempts to clear anything out to make sure
            # the terminate connection runs smoothly.
            self.vol_drv.disconnect_volume()
        except npvmex.VolumeDetachFailed as e:
            # Only log that the volume detach failed.  Should not be blocking
            # due to being in the revert flow.
            LOG.warning(_LW("Unable to disconnect volume for %(inst)s during "
                            "rollback.  Error was: %(error)s"),
                        {'inst': self.vol_drv.instance.name,
                         'error': e.message})
Ejemplo n.º 32
0
    def revert(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('Image for instance %s to be deleted'),
                    self.instance.name)

        # If there is no result, or its a direct failure, then there isn't
        # anything to delete.
        if result is None or isinstance(result, task_fail.Failure):
            return

        # Run the delete.  The result is a single disk.  Wrap into list
        # as the method works with plural disks.
        self.disk_dvr.delete_disks(self.context, self.instance, [result])
Ejemplo n.º 33
0
    def revert(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('Image for instance %s to be deleted'),
                    self.instance.name)

        # If there is no result, or its a direct failure, then there isn't
        # anything to delete.
        if result is None or isinstance(result, task_fail.Failure):
            return

        # Run the delete.  The result is a single disk.  Wrap into list
        # as the method works with plural disks.
        self.disk_dvr.delete_disks(self.context, self.instance, [result])
Ejemplo n.º 34
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {'mac': vif['address'],
                                                'inst': self.instance.name},
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'),
                                  {'mac': vif['address'],
                                   'inst': self.instance.name},
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'),
                            {'mac': vif['address'],
                             'inst': self.instance.name},
                            instance=self.instance)
        return cna_w_list
Ejemplo n.º 35
0
    def revert_impl(self, lpar_wrap, mgmt_cna, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.

        # No media builder, nothing to do
        if self.mb is None:
            return

        # Delete the virtual optical media. If it fails we don't care.
        try:
            self.mb.dlt_vopt(lpar_wrap.uuid)
        except Exception as e:
            LOG.warning(_LW('Vopt removal as part of spawn reversion failed '
                            'with: %(exc)s'), {'exc': six.text_type(e)},
                        instance=self.instance)
Ejemplo n.º 36
0
    def revert(self, result, flow_failures):
        # The parameters have to match the execute method, plus the response +
        # failures even if only a subset are used.
        LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
                        're-connected'),
                    {'vol': self.vol_id, 'inst': self.vol_drv.instance.name})

        # Note that the rollback is *instant*.  Resetting the FeedTask ensures
        # immediate rollback.
        self.vol_drv.reset_stg_ftsk()
        try:
            # We try to reconnect the volume here so that it maintains its
            # linkage (in the hypervisor) to the VM.  This makes it easier for
            # operators to understand the linkage between the VMs and volumes
            # in error scenarios.  This is simply useful for debug purposes
            # if there is an operational error.
            self.vol_drv.connect_volume()
        except npvmex.VolumeAttachFailed as e:
            # Only log that the volume attach failed.  Should not be blocking
            # due to being in the revert flow.  See comment above.
            LOG.warning(_LW("Unable to re-connect volume for %(inst)s during "
                            "rollback.  Error was: %(error)s"),
                        {'inst': self.vol_drv.instance.name,
                         'error': e.message})
Ejemplo n.º 37
0
    def revert(self, result, flow_failures):
        """Unmap the disk and then remove it from the management partition.

        We use this order to avoid rediscovering the device in case some other
        thread scans the SCSI bus between when we remove and when we unmap.
        """
        if self.vios_wrap is None or self.stg_elem is None:
            # We never even got connected - nothing to do
            return
        LOG.warning(_LW("Unmapping boot disk %(disk_name)s of instance "
                        "%(instance_name)s from management partition via "
                        "Virtual I/O Server %(vios_name)s."),
                    {'disk_name': self.stg_elem.name,
                     'instance_name': self.instance.name,
                     'vios_name': self.vios_wrap.name})
        self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
                                                self.stg_elem.name)

        if self.disk_path is None:
            # We did not discover the disk - nothing else to do.
            return
        LOG.warning(_LW("Removing disk %(disk_path)s from the management "
                        "partition."), {'disk_path': self.disk_path})
        mgmt.remove_block_dev(self.disk_path)
Ejemplo n.º 38
0
    def remove(self, instance):
        """Remove the stored NVRAM for an instance.

        :param instance: The instance for which the NVRAM will be removed.
        """
        # Remove any pending updates
        self._pop_from_list(uuid=instance.uuid)
        # Remove it from the store
        try:
            self._api.delete(instance)
        except Exception as e:
            # Delete exceptions should not end the operation
            LOG.warning(_LW('Could not delete NVRAM: %s'),
                        e,
                        instance=instance)
Ejemplo n.º 39
0
    def _update_internal_metric(self):
        """Uses the latest stats from the cache, and parses to Nova format.

        This method is invoked by the parent class after the raw metrics are
        updated.
        """
        # If there is no 'new' data (perhaps sampling is not turned on) then
        # return no data.
        if self.cur_phyp is None:
            return

        # Compute the cycles spent in FW since last collection.
        fw_cycles_delta = self._get_fw_cycles_delta()

        # Compute the cycles the system spent since last run.
        tot_cycles_delta = self._get_total_cycles_delta()

        # Get the user cycles since last run
        user_cycles_delta = self._gather_user_cycles_delta()

        # Make sure that the total cycles is higher than the user/fw cycles.
        # Should not happen, but just in case there is any precision loss from
        # CPU data back to system.
        if user_cycles_delta + fw_cycles_delta > tot_cycles_delta:
            LOG.warning(
                _LW("Host CPU Metrics determined that the total cycles reported "
                    "was less than the used cycles.  This indicates an issue with "
                    "the PCM data.  Please investigate the results.\n"
                    "Total Delta Cycles: %(tot_cycles)d\n"
                    "User Delta Cycles: %(user_cycles)d\n"
                    "Firmware Delta Cycles: %(fw_cycles)d"), {
                        'tot_cycles': tot_cycles_delta,
                        'fw_cycles': fw_cycles_delta,
                        'user_cycles': user_cycles_delta
                    })
            tot_cycles_delta = user_cycles_delta + fw_cycles_delta

        # Idle is the subtraction of all.
        idle_delta_cycles = (tot_cycles_delta - user_cycles_delta -
                             fw_cycles_delta)

        # The only moving cycles are idle, kernel and user.
        self.tot_data['idle'] += idle_delta_cycles
        self.tot_data['kernel'] += fw_cycles_delta
        self.tot_data['user'] += user_cycles_delta

        # Frequency doesn't accumulate like the others.  So this stays static.
        self.tot_data['frequency'] = self._get_cpu_freq()
Ejemplo n.º 40
0
    def _update_internal_metric(self):
        """Uses the latest stats from the cache, and parses to Nova format.

        This method is invoked by the parent class after the raw metrics are
        updated.
        """
        # If there is no 'new' data (perhaps sampling is not turned on) then
        # return no data.
        if self.cur_phyp is None:
            return

        # Compute the cycles spent in FW since last collection.
        fw_cycles_delta = self._get_fw_cycles_delta()

        # Compute the cycles the system spent since last run.
        tot_cycles_delta = self._get_total_cycles_delta()

        # Get the user cycles since last run
        user_cycles_delta = self._gather_user_cycles_delta()

        # Make sure that the total cycles is higher than the user/fw cycles.
        # Should not happen, but just in case there is any precision loss from
        # CPU data back to system.
        if user_cycles_delta + fw_cycles_delta > tot_cycles_delta:
            LOG.warning(_LW(
                "Host CPU Metrics determined that the total cycles reported "
                "was less than the used cycles.  This indicates an issue with "
                "the PCM data.  Please investigate the results.\n"
                "Total Delta Cycles: %(tot_cycles)d\n"
                "User Delta Cycles: %(user_cycles)d\n"
                "Firmware Delta Cycles: %(fw_cycles)d"),
                {'tot_cycles': tot_cycles_delta, 'fw_cycles': fw_cycles_delta,
                 'user_cycles': user_cycles_delta})
            tot_cycles_delta = user_cycles_delta + fw_cycles_delta

        # Idle is the subtraction of all.
        idle_delta_cycles = (tot_cycles_delta - user_cycles_delta -
                             fw_cycles_delta)

        # The only moving cycles are idle, kernel and user.
        self.tot_data['idle'] += idle_delta_cycles
        self.tot_data['kernel'] += fw_cycles_delta
        self.tot_data['user'] += user_cycles_delta

        # Frequency doesn't accumulate like the others.  So this stays static.
        self.tot_data['frequency'] = self._get_cpu_freq()
Ejemplo n.º 41
0
    def connect_instance_disk_to_mgmt(self, instance):
        """Connect an instance's boot disk to the management partition.

        :param instance: The instance whose boot disk is to be mapped.
        :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
        :return vios: The EntryWrapper of the VIOS from which the mapping was
                      made.
        :raise InstanceDiskMappingFailed: If the mapping could not be done.
        """
        msg_args = {"instance_name": instance.name}
        lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid)
        for stg_elem, vios in self.instance_disk_iter(instance, lpar_wrap=lpar_wrap):
            msg_args["disk_name"] = stg_elem.name
            msg_args["vios_name"] = vios.name

            # Create a new mapping.  NOTE: If there's an existing mapping on
            # the other VIOS but not this one, we'll create a second mapping
            # here.  It would take an extreme sequence of events to get to that
            # point, and the second mapping would be harmless anyway. The
            # alternative would be always checking all VIOSes for existing
            # mappings, which increases the response time of the common case by
            # an entire GET of VIOS+SCSI_MAPPING.
            LOG.debug(
                "Mapping boot disk %(disk_name)s of instance "
                "%(instance_name)s to the management partition from "
                "Virtual I/O Server %(vios_name)s.",
                msg_args,
            )
            try:
                tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, stg_elem)
                # If that worked, we're done.  add_vscsi_mapping logged.
                return stg_elem, vios
            except Exception as e:
                msg_args["exc"] = e
                LOG.warning(
                    _LW(
                        "Failed to map boot disk %(disk_name)s of "
                        "instance %(instance_name)s to the management "
                        "partition from Virtual I/O Server "
                        "%(vios_name)s: %(exc)s"
                    ),
                    msg_args,
                )
                # Try the next hit, if available.
        # We either didn't find the boot dev, or failed all attempts to map it.
        raise npvmex.InstanceDiskMappingFailed(**msg_args)
Ejemplo n.º 42
0
    def _discover_volume_on_vios(self, vios_w, volume_id):
        """Discovers an hdisk on a single vios for the volume.

        :param vios_w: VIOS wrapper to process
        :param volume_id: Volume to discover
        :returns: Status of the volume or None
        :returns: Device name or None
        :returns: LUN or None
        """
        # Get the initiatior WWPNs, targets and Lun for the given VIOS.
        vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)

        # Build the ITL map and discover the hdisks on the Virtual I/O
        # Server (if any).
        itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
        if len(itls) == 0:
            LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.', {
                'vios': vios_w.name,
                'volume_id': volume_id
            })
            return None, None, None

        status, device_name, udid = hdisk.discover_hdisk(
            self.adapter, vios_w.uuid, itls)

        if hdisk.good_discovery(status, device_name):
            LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
                         'volume %(volume_id)s. Status code: %(status)s.'), {
                             'hdisk': device_name,
                             'vios': vios_w.name,
                             'volume_id': volume_id,
                             'status': str(status)
                         },
                     instance=self.instance)
        elif status == hdisk.LUAStatus.DEVICE_IN_USE:
            LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s '
                            'on %(vios)s is in use. Error code: %(status)s.'),
                        {
                            'dev': device_name,
                            'volume': volume_id,
                            'vios': vios_w.name,
                            'status': str(status)
                        },
                        instance=self.instance)

        return status, device_name, udid
Ejemplo n.º 43
0
    def _update_internal_metric(self):
        """Uses the latest stats from the cache, and parses to Nova format.

        This method is invoked by the parent class after the raw metrics are
        updated.
        """
        # If there is no 'new' data (perhaps sampling is not turned on) then
        # return no data.
        if self.cur_phyp is None:
            self.cur_data = None
            return

        # Move the current data to the previous.  The previous data is used
        # for some internal calculations.  Blank out the current data just
        # in case of error.  Don't want to persist two copies of same.
        self.prev_data, self.cur_data = self.cur_data, None

        # Now we need the firmware cycles.
        fw_cycles = self.cur_phyp.sample.system_firmware.utilized_proc_cycles

        # Compute the max cycles.
        tot_cycles = self._get_total_cycles()

        # Get the total user cycles.
        user_cycles = self._gather_user_cycles()

        # Make sure that the total cycles is higher than the user/fw cycles.
        # Should not happen, but just in case there is any precision loss from
        # CPU data back to system.
        if user_cycles + fw_cycles > tot_cycles:
            LOG.warning(_LW("Host CPU Metrics determined that the total "
                            "cycles reported was less than the used cycles.  "
                            "This indicates an issue with the PCM data.  "
                            "Please investigate the results."))
            tot_cycles = user_cycles + fw_cycles

        # Idle is the subtraction of all.
        idle_cycles = tot_cycles - user_cycles - fw_cycles

        # Get the processor frequency.
        freq = self._get_cpu_freq()

        # Now save these cycles to the internal data structure.
        self.cur_data = {'idle': idle_cycles, 'kernel': fw_cycles,
                         'user': user_cycles, 'iowait': 0, 'frequency': freq}
Ejemplo n.º 44
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # This is a default implementation that most implementations will
        # require.

        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'), {
                                'mac': vif['address'],
                                'inst': self.instance.name
                            },
                        instance=self.instance)
            return None

        LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'), {
            'mac': vif['address'],
            'inst': self.instance.name
        },
                 instance=self.instance)
        try:
            cna_w.delete()
        except Exception as e:
            LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
                          '%(inst)s.'), {
                              'mac': vif['address'],
                              'inst': self.instance.name
                          },
                      instance=self.instance)
            LOG.exception(e, instance=self.instance)
            raise exception.VirtualInterfaceUnplugException(
                reason=six.text_type(e))
        return cna_w
Ejemplo n.º 45
0
 def unplug(self, vif, cna_w_list=None):
     mac = pvm_util.sanitize_mac_for_api(vif['address'])
     vnic = vm.get_vnics(self.adapter,
                         self.instance,
                         mac=mac,
                         one_result=True)
     if not vnic:
         LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                         'instance %(inst)s.  No matching vNIC was found '
                         'on the instance.  VIF: %(vif)s'), {
                             'mac': mac,
                             'inst': self.instance.name,
                             'vif': vif
                         },
                     instance=self.instance)
         return None
     vnic.delete()
     return vnic
Ejemplo n.º 46
0
    def connect_instance_disk_to_mgmt(self, instance):
        """Connect an instance's boot disk to the management partition.

        :param instance: The instance whose boot disk is to be mapped.
        :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
        :return vios: The EntryWrapper of the VIOS from which the mapping was
                      made.
        :raise InstanceDiskMappingFailed: If the mapping could not be done.
        """
        msg_args = {'instance_name': instance.name}
        lpar_wrap = vm.get_instance_wrapper(self.adapter, instance,
                                            self.host_uuid)
        for stg_elem, vios in self.instance_disk_iter(instance,
                                                      lpar_wrap=lpar_wrap):
            msg_args['disk_name'] = stg_elem.name
            msg_args['vios_name'] = vios.name

            # Create a new mapping.  NOTE: If there's an existing mapping on
            # the other VIOS but not this one, we'll create a second mapping
            # here.  It would take an extreme sequence of events to get to that
            # point, and the second mapping would be harmless anyway. The
            # alternative would be always checking all VIOSes for existing
            # mappings, which increases the response time of the common case by
            # an entire GET of VIOS+SCSI_MAPPING.
            LOG.debug(
                "Mapping boot disk %(disk_name)s of instance "
                "%(instance_name)s to the management partition from "
                "Virtual I/O Server %(vios_name)s.", msg_args)
            try:
                tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid,
                                          stg_elem)
                # If that worked, we're done.  add_vscsi_mapping logged.
                return stg_elem, vios
            except Exception as e:
                msg_args['exc'] = e
                LOG.warning(
                    _LW("Failed to map boot disk %(disk_name)s of "
                        "instance %(instance_name)s to the management "
                        "partition from Virtual I/O Server "
                        "%(vios_name)s: %(exc)s"), msg_args)
                # Try the next hit, if available.
        # We either didn't find the boot dev, or failed all attempts to map it.
        raise npvmex.InstanceDiskMappingFailed(**msg_args)
Ejemplo n.º 47
0
    def _ensure_phys_ports_for_system(self, npiv_port_maps, vios_wraps,
                                      fabric):
        """Ensures that the npiv_port_map is correct for the system.

        Rare scenarios can occur where the physical port on the NPIV port
        map does not match the actual port.  This is generally caused when the
        last volume is removed from the VM, the VM is migrated to another host,
        and then a new volume is attached.

        Stale metadata would be there (as it can't be cleaned out) on the
        attach.  This method clears that up.

        :param npiv_port_maps: The existing port maps.
        :param vios_wraps: The Virtual I/O Server wraps.
        :param fabric: The name of the fabric
        :return: The npiv_port_maps.  May be unchanged.
        """
        # Check that all physical ports in the mappings belong to 'this'
        # set of VIOSs.
        if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
               for pm in npiv_port_maps):
            LOG.debug("Physical ports check out - just return maps.")
            return npiv_port_maps

        # If ANY of the VIOS ports were not there, rebuild the port maps
        LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state." %
                  npiv_port_maps)
        v_wwpns = []
        for port_map in npiv_port_maps:
            v_wwpns.extend(port_map[1].split())
        self._set_fabric_state(fabric, FS_UNMAPPED)

        # Derive new maps and don't preserve existing maps
        npiv_port_maps = pvm_vfcm.derive_npiv_map(
            vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
        LOG.debug("Rebuilt port maps: %s" % npiv_port_maps)
        self._set_fabric_meta(fabric, npiv_port_maps)
        LOG.warning(_LW("Had to update the system metadata for the WWPNs "
                        "due to incorrect physical WWPNs on fabric "
                        "%(fabric)s"),
                    {'fabric': fabric}, instance=self.instance)

        return npiv_port_maps
Ejemplo n.º 48
0
    def _ensure_phys_ports_for_system(self, npiv_port_maps, vios_wraps,
                                      fabric):
        """Ensures that the npiv_port_map is correct for the system.

        Rare scenarios can occur where the physical port on the NPIV port
        map does not match the actual port.  This is generally caused when the
        last volume is removed from the VM, the VM is migrated to another host,
        and then a new volume is attached.

        Stale metadata would be there (as it can't be cleaned out) on the
        attach.  This method clears that up.

        :param npiv_port_maps: The existing port maps.
        :param vios_wraps: The Virtual I/O Server wraps.
        :param fabric: The name of the fabric
        :return: The npiv_port_maps.  May be unchanged.
        """
        # Check that all physical ports in the mappings belong to 'this'
        # set of VIOSs.
        if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
               for pm in npiv_port_maps):
            LOG.debug("Physical ports check out - just return maps.")
            return npiv_port_maps

        # If ANY of the VIOS ports were not there, rebuild the port maps
        LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
                  npiv_port_maps)
        v_wwpns = []
        for port_map in npiv_port_maps:
            v_wwpns.extend(port_map[1].split())
        self._set_fabric_state(fabric, FS_UNMAPPED)

        # Derive new maps and don't preserve existing maps
        npiv_port_maps = pvm_vfcm.derive_npiv_map(
            vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
        LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
        self._set_fabric_meta(fabric, npiv_port_maps)
        LOG.warning(_LW("Had to update the system metadata for the WWPNs "
                        "due to incorrect physical WWPNs on fabric "
                        "%(fabric)s"),
                    {'fabric': fabric}, instance=self.instance)

        return npiv_port_maps
Ejemplo n.º 49
0
    def _get_data(self, instance):
        """Get the NVRAM data for the instance.

        :param inst: The instance to get the data for.
        :returns: The NVRAM data for the instance.
        """
        data = None
        try:
            # Get the data from the adapter.
            entry = vm.get_instance_wrapper(self._adapter, instance,
                                            xag=[pvm_const.XAG.NVRAM])
            data = entry.nvram
            LOG.debug('NVRAM for instance: %s', data, instance=instance)
        except pvm_exc.HttpError as e:
            # The VM might have been deleted since the store request.
            if e.response.status not in ['404']:
                LOG.exception(e)
                LOG.warning(_LW('Unable to store the NVRAM for instance: '
                                '%s'), instance.name)
        return data
Ejemplo n.º 50
0
    def create_disk_from_image(self,
                               context,
                               instance,
                               image_meta,
                               disk_size,
                               image_type=DiskType.BOOT):
        """Creates a disk and copies the specified image to it.

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the disk for.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param disk_size: The size of the disk to create in GB.  If smaller
                          than the image, it will be ignored (as the disk
                          must be at least as big as the image).  Must be an
                          int.
        :param image_type: the image type. See disk constants above.
        :return: The backing pypowervm storage object that was created.
        """

        # Retry 3 times on exception
        for attempt in range(1, 5):
            try:
                return self._create_disk_from_image(context,
                                                    instance,
                                                    image_meta,
                                                    disk_size,
                                                    image_type=image_type)
            except Exception as error:
                if attempt < 4:
                    LOG.exception(error)
                    LOG.warning(_LW("Instance %(inst)s Disk Upload attempt "
                                    "#%(attempt)d failed. Retrying the "
                                    "upload."), {
                                        "attempt": attempt,
                                        "inst": instance.name
                                    },
                                instance=instance)
                    time.sleep(random.randint(1, 5))
                else:
                    raise
Ejemplo n.º 51
0
    def _get_data(self, instance):
        """Get the NVRAM data for the instance.

        :param inst: The instance to get the data for.
        :returns: The NVRAM data for the instance.
        """
        data = None
        try:
            # Get the data from the adapter.
            entry = vm.get_instance_wrapper(self._adapter,
                                            instance,
                                            xag=[pvm_const.XAG.NVRAM])
            data = entry.nvram
            LOG.debug('NVRAM for instance: %s', data, instance=instance)
        except pvm_exc.HttpError as e:
            # The VM might have been deleted since the store request.
            if e.response.status not in [404]:
                LOG.exception(e)
                LOG.warning(
                    _LW('Unable to store the NVRAM for instance: '
                        '%s'), instance.name)
        return data
Ejemplo n.º 52
0
    def _get_fabric_meta(self, fabric):
        """Gets the port map from the instance's system metadata.

        See _set_fabric_meta.

        :param fabric: The name of the fabric.
        :return: The port map (as defined via the derive_npiv_map pypowervm
                 method.
        """
        meta_key = self._sys_meta_fabric_key(fabric)

        if self.instance.system_metadata.get(meta_key) is None:
            # If no mappings exist, log a warning.
            LOG.warning(
                _LW("No NPIV mappings exist for instance %(inst)s on "
                    "fabric %(fabric)s.  May not have connected to "
                    "the fabric yet or fabric configuration was "
                    "recently modified."), {
                        'inst': self.instance.name,
                        'fabric': fabric
                    })
            return []

        wwpns = self.instance.system_metadata[meta_key]
        key_len = len(meta_key)
        iterator = 2
        meta_key = meta_key + "_" + str(iterator)
        while self.instance.system_metadata.get(meta_key) is not None:
            meta_value = self.instance.system_metadata[meta_key]
            wwpns += "," + meta_value
            iterator += 1
            meta_key = meta_key.replace(meta_key[key_len:],
                                        "_" + str(iterator))

        wwpns = wwpns.split(",")

        # Rebuild the WWPNs into the natural structure.
        return [(p, ' '.join([v1, v2]))
                for p, v1, v2 in zip(wwpns[::3], wwpns[1::3], wwpns[2::3])]
Ejemplo n.º 53
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        Extends the base implementation, but before invoking it will remove
        itself from the bridge it is connected to and delete the corresponding
        trunk device on the mgmt partition.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Find the CNA for this vif.
        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'),
                        {'mac': vif['address'], 'inst': self.instance.name})
            return None

        # Find and delete the trunk adapters
        trunks = pvm_cna.find_trunks(self.adapter, cna_w)

        dev_name = self.get_trunk_dev_name(vif)
        utils.execute('ip', 'link', 'set', dev_name, 'down', run_as_root=True)
        utils.execute('brctl', 'delif', vif['network']['bridge'],
                      dev_name, run_as_root=True)
        for trunk in trunks:
            trunk.delete()

        # Now delete the client CNA
        return super(PvmLBVifDriver, self).unplug(vif, cna_w_list=cna_w_list)
Ejemplo n.º 54
0
    def process(self, events):
        """Process the event that comes back from PowerVM.

        :param events: The pypowervm Event wrapper.
        """
        inst_cache = {}
        for pvm_event in events:
            try:
                # Pull all the pieces of the event.
                details = (pvm_event.detail.split(',')
                           if pvm_event.detail else [])

                if pvm_event.etype not in pvm_evt.EventType.NEW_CLIENT:
                    LOG.debug('PowerVM Event-Action: %s URI: %s Details %s',
                              pvm_event.etype, pvm_event.data, details)
                inst_cache[pvm_event.data] = self._handle_event(
                    pvm_event,
                    details,
                    inst=inst_cache.get(pvm_event.data, None))
            except Exception as e:
                LOG.exception(e)
                LOG.warning(_LW('Unable to parse event URI: %s from PowerVM.'),
                            pvm_event.data)
Ejemplo n.º 55
0
    def _get_fabric_meta(self, fabric):
        """Gets the port map from the instance's system metadata.

        See _set_fabric_meta.

        :param fabric: The name of the fabric.
        :return: The port map (as defined via the derive_npiv_map pypowervm
                 method.
        """
        meta_key = self._sys_meta_fabric_key(fabric)

        if self.instance.system_metadata.get(meta_key) is None:
            # If no mappings exist, log a warning.
            LOG.warning(_LW("No NPIV mappings exist for instance %(inst)s on "
                            "fabric %(fabric)s.  May not have connected to "
                            "the fabric yet or fabric configuration was "
                            "recently modified."),
                        {'inst': self.instance.name, 'fabric': fabric})
            return []

        wwpns = self.instance.system_metadata[meta_key]
        key_len = len(meta_key)
        iterator = 2
        meta_key = meta_key + "_" + str(iterator)
        while self.instance.system_metadata.get(meta_key) is not None:
            meta_value = self.instance.system_metadata[meta_key]
            wwpns += "," + meta_value
            iterator += 1
            meta_key = meta_key.replace(meta_key[key_len:],
                                        "_" + str(iterator))

        wwpns = wwpns.split(",")

        # Rebuild the WWPNs into the natural structure.
        return [(p, ' '.join([v1, v2])) for p, v1, v2
                in zip(wwpns[::3], wwpns[1::3], wwpns[2::3])]
Ejemplo n.º 56
0
 def revert(self, disk_dev_info, result, flow_failures):
     LOG.warning(_LW('Disk image being disconnected from instance %s'),
                 self.instance.name)
     # Note that the FeedTask is None - to force instant disconnect.
     self.disk_dvr.disconnect_image_disk(self.context, self.instance)
Ejemplo n.º 57
0
    def execute_impl(self, lpar_wrap):
        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_network_infos = []
        for network_info in self.network_infos:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == network_info['address']:
                    break
            else:
                crt_network_infos.append(network_info)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_network_infos) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # TODO(KYLEH): We're setting up to wait for an instance event.  The
        # event needs to come back to our compute manager so we need to ensure
        # the instance.host is set to our host.  We shouldn't need to do this
        # but in the evacuate/recreate case it may reflect the old host.
        # See: https://bugs.launchpad.net/nova/+bug/1535918
        undo_host_change = False
        if self.instance.host != CONF.host:
            LOG.warning(_LW('Instance was not assigned to this host. '
                            'It was assigned to: %s'), self.instance.host,
                        instance=self.instance)
            # Update the instance...
            old_host = self.instance.host
            self.instance.host = CONF.host
            self.instance.save()
            undo_host_change = True

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for network_info in crt_network_infos:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': network_info['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    vif.plug(self.adapter, self.host_uuid, self.instance,
                             network_info, self.slot_mgr)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()
        finally:
            if undo_host_change:
                LOG.info(_LI('Undoing temporary host assignment to instance.'),
                         instance=self.instance)
                self.instance.host = old_host
                self.instance.save()

        # Return the list of created VIFs.
        return cna_w_list
Ejemplo n.º 58
0
    def _validate_vopt_vg(self):
        """Will ensure that the virtual optical media repository exists.

        This method will connect to one of the Virtual I/O Servers on the
        system and ensure that there is a root_vg that the optical media (which
        is temporary) exists.

        If the volume group on an I/O Server goes down (perhaps due to
        maintenance), the system will rescan to determine if there is another
        I/O Server that can host the request.

        The very first invocation may be expensive.  It may also be expensive
        to call if a Virtual I/O Server unexpectantly goes down.

        If there are no Virtual I/O Servers that can support the media, then
        an exception will be thrown.
        """

        # If our static variables were set, then we should validate that the
        # repo is still running.  Otherwise, we need to reset the variables
        # (as it could be down for maintenance).
        if ConfigDrivePowerVM._cur_vg_uuid is not None:
            vio_uuid = ConfigDrivePowerVM._cur_vios_uuid
            vg_uuid = ConfigDrivePowerVM._cur_vg_uuid
            try:
                vg_resp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                            vio_uuid, pvm_stg.VG.schema_type,
                                            vg_uuid)
                if vg_resp is not None:
                    return
            except Exception:
                pass

            LOG.info(_LI("An error occurred querying the virtual optical "
                         "media repository.  Attempting to re-establish "
                         "connection with a virtual optical media repository"))

        # If we're hitting this, either it's our first time booting up, or the
        # previously used Volume Group went offline (ex. VIOS went down for
        # maintenance).
        #
        # Since it doesn't matter which VIOS we use for the media repo, we
        # should query all Virtual I/O Servers and see if an appropriate
        # media repository exists.
        vios_resp = self.adapter.read(pvm_ms.System.schema_type,
                                      root_id=self.host_uuid,
                                      child_type=pvm_vios.VIOS.schema_type)
        vio_wraps = pvm_vios.VIOS.wrap(vios_resp)

        # First loop through the VIOSes to see if any have the right VG
        found_vg = None
        found_vios = None

        for vio_wrap in vio_wraps:
            # If the RMC state is not active, skip over to ensure we don't
            # timeout
            if vio_wrap.rmc_state != pvm_bp.RMCState.ACTIVE:
                continue

            try:
                vg_resp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                            root_id=vio_wrap.uuid,
                                            child_type=pvm_stg.VG.schema_type)
                vg_wraps = pvm_stg.VG.wrap(vg_resp)
                for vg_wrap in vg_wraps:
                    if vg_wrap.name == CONF.powervm.vopt_media_volume_group:
                        found_vg = vg_wrap
                        found_vios = vio_wrap
                        break
            except Exception:
                LOG.warning(_LW('Unable to read volume groups for Virtual '
                                'I/O Server %s'), vio_wrap.name)

        # If we didn't find a volume group...raise the exception.  It should
        # default to being the rootvg, which all VIOSes will have.  Otherwise,
        # this is user specified, and if it was not found is a proper
        # exception path.
        if found_vg is None:
            raise npvmex.NoMediaRepoVolumeGroupFound(
                vol_grp=CONF.powervm.vopt_media_volume_group)

        # Ensure that there is a virtual optical media repository within it.
        if len(found_vg.vmedia_repos) == 0:
            vopt_repo = pvm_stg.VMediaRepos.bld(
                self.adapter, 'vopt', str(CONF.powervm.vopt_media_rep_size))
            found_vg.vmedia_repos = [vopt_repo]
            found_vg = found_vg.update()

        # At this point, we know that we've successfully set up the volume
        # group.  Save to the static class variables.
        ConfigDrivePowerVM._cur_vg_uuid = found_vg.uuid
        ConfigDrivePowerVM._cur_vios_uuid = found_vios.uuid
        ConfigDrivePowerVM._cur_vios_name = found_vios.name
Ejemplo n.º 59
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()

                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(_LW(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  No disconnect "
                            "actions to be taken as volume is not healthy."),
                            {'volume_id': self.volume_id, 'vios': vios_w.name})
                        return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"),
                    {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
                     'volume_id': self.volume_id})
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."),
                     {'volume_uid': udid, 'volume_id': self.volume_id,
                      'vios_name': vios_w.name, 'hdisk': device_name})

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio', pvm_vios.VIOS.getter(
                    self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(
                discon_vol_for_vio, provides='vio_modified', flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([result['vio_modified']
                        for result in ret['wrapper_task_rets'].values()]):
                LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
                                "volume %(volume_id)s on ANY of the Virtual "
                                "I/O Servers for instance %(inst)s."),
                            {'inst': self.instance.name,
                             'volume_id': self.volume_id})

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
                      self.vm_uuid)
            LOG.exception(_LE('Error: %s'), e)
            ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e),
                       'instance_name': self.instance.name}
            raise p_exc.VolumeDetachFailed(**ex_args)
Ejemplo n.º 60
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()

                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(_LW(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  No disconnect "
                            "actions to be taken as volume is not healthy."),
                            {'volume_id': self.volume_id, 'vios': vios_w.name})
                        return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"),
                    {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
                     'volume_id': self.volume_id})
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."),
                     {'volume_uid': udid, 'volume_id': self.volume_id,
                      'vios_name': vios_w.name, 'hdisk': device_name})

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True