예제 #1
0
    def disconnect_volume(self):
        """Disconnect the volume."""
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
                                         self.host_uuid)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(volume_id=self.volume_id,
                                         instance_name=self.instance.name,
                                         reason=reason)

        # Run the disconnect
        self._disconnect_volume()

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
예제 #2
0
    def disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is detached from the VM.
        """
        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(
                volume_id=self.volume_id, instance_name=self.instance.name,
                reason=reason)

        # Run the disconnect
        self._disconnect_volume(slot_mgr)

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
예제 #3
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        This is the actual method to implement within the subclass.  Some
        transaction maintenance is done by the parent class.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            try:
                device_name = self._get_devname()

                if not device_name:
                    # We lost our bdm data.

                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on future
                    # deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                target_iqn = self.connection_info["data"]["target_iqn"]

                def logout():
                    hdisk.remove_iscsi(self.adapter, target_iqn, vios_w.uuid)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(logout,
                                     name='remove_iSCSI_%s' % target_iqn))
            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Disconnect Volume: Failed to disconnect the volume "
                    "%(volume_id)s on ANY of the Virtual I/O Servers.",
                    {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
예제 #4
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            udid = self._get_udid()
            try:
                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # Check if the hdisk is in a bad state in the I/O Server.
                    # Subsequent scrub code on future deploys will clean it up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  This may be the result of "
                            "an evacuate.", {
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            },
                            instance=self.instance)
                        return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id,
                        'volume_uid': udid
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.  Volume UDID: %(volume_uid)s.", {
                    'volume_uid': udid,
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Disconnect Volume: Failed to disconnect the "
                    "volume %(volume_id)s on ANY of the Virtual "
                    "I/O Servers.", {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
예제 #5
0
    def _disconnect_volume(self):
        """Disconnect the volume."""
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()
                if not udid:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                if udid and not device_name:
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not device_name:
                    LOG.warn(
                        _LW("Disconnect Volume: No mapped device found on Virtual "
                            "I/O Server %(vios)s for volume %(volume_id)s.  "
                            "Volume UDID: %(volume_uid)s"), {
                                'volume_uid': udid,
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            })
                    return False

            except Exception as e:
                LOG.warn(
                    _LW("Disconnect Volume: Failed to find disk on Virtual I/O "
                        "Server %(vios_name)s for volume %(volume_id)s. Volume "
                        "UDID: %(volume_uid)s.  Error: %(error)s"), {
                            'error': e,
                            'volume_uid': udid,
                            'vios_name': vios_w.name,
                            'volume_id': self.volume_id
                        })
                return False

            # We have found the device name
            LOG.info(
                _LI("Disconnect Volume: Discovered the device %(hdisk)s "
                    "on Virtual I/O Server %(vios_name)s for volume "
                    "%(volume_id)s.  Volume UDID: %(volume_uid)s."), {
                        'volume_uid': udid,
                        'volume_id': self.volume_id,
                        'vios_name': vios_w.name,
                        'hdisk': device_name
                    })

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name)

                # Add a step after the mapping removal to also remove the
                # hdisk.
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_vios.VIOS.xags.STORAGE]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warn(
                    _LW("Disconnect Volume: Failed to disconnect the "
                        "volume %(volume_id)s on ANY of the Virtual I/O "
                        "Servers for instance %(inst)s."), {
                            'inst': self.instance.name,
                            'volume_id': self.volume_id
                        })

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
                      self.vm_uuid)
            LOG.exception(_LE('Error: %s'), e)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)