예제 #1
0
파일: fcvscsi.py 프로젝트: y00187570/nova
    def attach_volume(self):
        """Attaches the volume."""

        # Check if the VM is in a state where the attach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeAttachFailed(volume_id=self.volume_id,
                                         reason=reason)

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        attach_ftsk = pvm_tx.FeedTask(
            'attach_volume_to_vio',
            pvm_vios.VIOS.getter(
                self.adapter,
                xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        attach_ftsk.add_functor_subtask(self._attach_volume_to_vio,
                                        provides='vio_modified',
                                        flag_update=False)

        ret = attach_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        # Validate that a vios was found
        if vioses_modified == 0:
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') % {
                         'volume_id': self.volume_id
                     })
            ex_args = {'volume_id': self.volume_id, 'reason': msg}
            raise exc.VolumeAttachFailed(**ex_args)

        self.stg_ftsk.execute()
예제 #2
0
파일: fcvscsi.py 프로젝트: arbrandes/nova
    def attach_volume(self):
        """Attaches the volume."""

        # Check if the VM is in a state where the attach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeAttachFailed(
                volume_id=self.volume_id, reason=reason)

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        attach_ftsk = pvm_tx.FeedTask(
            'attach_volume_to_vio', pvm_vios.VIOS.getter(
                self.adapter, xag=[pvm_const.XAG.VIO_STOR,
                                   pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        attach_ftsk.add_functor_subtask(
            self._attach_volume_to_vio, provides='vio_modified',
            flag_update=False)

        ret = attach_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        # Validate that a vios was found
        if vioses_modified == 0:
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') %
                   {'volume_id': self.volume_id})
            ex_args = {'volume_id': self.volume_id, 'reason': msg}
            raise exc.VolumeAttachFailed(**ex_args)

        self.stg_ftsk.execute()
예제 #3
0
파일: network.py 프로젝트: arbrandes/nova
    def execute(self):
        # If the LPAR is not in an OK state for deleting, then throw an
        # error up front.
        lpar_wrap = vm.get_instance_wrapper(self.adapter, self.instance)
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error("Unable to remove VIFs from instance in the system's "
                      "current state. The reason reported by the system is: "
                      "%s", reason, instance=self.instance)
            raise exception.VirtualInterfaceUnplugException(reason=reason)

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for network_info in self.network_infos:
            vif.unplug(self.adapter, self.instance, network_info,
                       cna_w_list=cna_w_list)
예제 #4
0
파일: fcvscsi.py 프로젝트: y00187570/nova
    def detach_volume(self):
        """Detach the volume."""

        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(volume_id=self.volume_id,
                                         reason=reason)

        # Run the detach
        try:
            # See logic in attach_volume for why this new FeedTask is here.
            detach_ftsk = pvm_tx.FeedTask(
                'detach_volume_from_vio',
                pvm_vios.VIOS.getter(
                    self.adapter,
                    xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))
            # Find hdisks to detach
            detach_ftsk.add_functor_subtask(self._detach_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)

            ret = detach_ftsk.execute()

            # Warn if no hdisks detached.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Detach Volume: Failed to detach the "
                    "volume %(volume_id)s on ANY of the Virtual "
                    "I/O Servers.", {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {'volume_id': self.volume_id, 'reason': str(e)}
            raise exc.VolumeDetachFailed(**ex_args)
        self.stg_ftsk.execute()
예제 #5
0
    def _get_bootdisk_iter(self, instance):
        """Return an iterator of (storage_elem, VIOS) tuples for the instance.

        storage_elem is a pypowervm storage element wrapper associated with
        the instance boot disk and VIOS is the wrapper of the Virtual I/O
        server owning that storage element.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :return: Iterator of tuples of (storage_elem, VIOS).
        """
        lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
        match_func = self._disk_match_func(DiskType.BOOT, instance)
        for vios_uuid in self._vios_uuids:
            vios_wrap = pvm_vios.VIOS.get(self._adapter,
                                          uuid=vios_uuid,
                                          xag=[pvm_const.XAG.VIO_SMAP])
            for scsi_map in tsk_map.find_maps(vios_wrap.scsi_mappings,
                                              client_lpar_id=lpar_wrap.id,
                                              match_func=match_func):
                yield scsi_map.backing_storage, vios_wrap
예제 #6
0
파일: fcvscsi.py 프로젝트: arbrandes/nova
    def detach_volume(self):
        """Detach the volume."""

        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(
                volume_id=self.volume_id, reason=reason)

        # Run the detach
        try:
            # See logic in attach_volume for why this new FeedTask is here.
            detach_ftsk = pvm_tx.FeedTask(
                'detach_volume_from_vio', pvm_vios.VIOS.getter(
                    self.adapter, xag=[pvm_const.XAG.VIO_STOR,
                                       pvm_const.XAG.VIO_SMAP]))
            # Find hdisks to detach
            detach_ftsk.add_functor_subtask(
                self._detach_vol_for_vio, provides='vio_modified',
                flag_update=False)

            ret = detach_ftsk.execute()

            # Warn if no hdisks detached.
            if not any([result['vio_modified']
                        for result in ret['wrapper_task_rets'].values()]):
                LOG.warning("Detach Volume: Failed to detach the "
                            "volume %(volume_id)s on ANY of the Virtual "
                            "I/O Servers.", {'volume_id': self.volume_id},
                            instance=self.instance)

        except Exception as e:
            LOG.exception('PowerVM error detaching volume from virtual '
                          'machine.', instance=self.instance)
            ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e)}
            raise exc.VolumeDetachFailed(**ex_args)
        self.stg_ftsk.execute()
예제 #7
0
    def execute(self):
        # If the LPAR is not in an OK state for deleting, then throw an
        # error up front.
        lpar_wrap = vm.get_instance_wrapper(self.adapter, self.instance)
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(
                "Unable to remove VIFs from instance in the system's "
                "current state. The reason reported by the system is: "
                "%s",
                reason,
                instance=self.instance)
            raise exception.VirtualInterfaceUnplugException(reason=reason)

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for network_info in self.network_infos:
            vif.unplug(self.adapter,
                       self.instance,
                       network_info,
                       cna_w_list=cna_w_list)
예제 #8
0
파일: vm.py 프로젝트: arbrandes/nova
 def execute(self):
     return vm.get_instance_wrapper(self.adapter, self.instance)
예제 #9
0
 def execute(self):
     return vm.get_instance_wrapper(self.adapter, self.instance)