示例#1
0
    def _validate_vios_on_connection(self, num_vioses_found):
        """Validates that the correct number of VIOSes were discovered.

        Certain environments may have redundancy requirements.  For PowerVM
        this is achieved by having multiple Virtual I/O Servers.  This method
        will check to ensure that the operator's requirements for redundancy
        have been met.  If not, a specific error message will be raised.

        :param num_vioses_found: The number of VIOSes the hdisk was found on.
        """
        # Is valid as long as the vios count exceeds the conf value.
        if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required:
            return

        # Should have a custom message based on zero or 'some but not enough'
        # I/O Servers.
        if num_vioses_found == 0:
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') %
                   {'volume_id': self.volume_id})
        else:
            msg = (_('Failed to discover the hdisk on the required number of '
                     'Virtual I/O Servers.  Volume %(volume_id)s required '
                     '%(vios_req)d Virtual I/O Servers, but the disk was only '
                     'found on %(vios_act)d Virtual I/O Servers.') %
                   {'volume_id': self.volume_id, 'vios_act': num_vioses_found,
                    'vios_req': CONF.powervm.vscsi_vios_connections_required})
        ex_args = {'volume_id': self.volume_id, 'reason': msg,
                   'instance_name': self.instance.name}
        raise p_exc.VolumeAttachFailed(**ex_args)
示例#2
0
    def connect_volume(self):
        """Connects the volume."""
        # Check if the VM is in a state where the attach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
                                         self.host_uuid)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeAttachFailed(volume_id=self.volume_id,
                                         instance_name=self.instance.name,
                                         reason=reason)

        # Run the connect
        self._connect_volume()

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
示例#3
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(
                    _LE("Mappings were not able to find a proper VIOS. "
                        "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
示例#4
0
    def connect_volume(self, slot_mgr):
        """Connects the volume.

        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        # Check if the VM is in a state where the attach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeAttachFailed(
                volume_id=self.volume_id, instance_name=self.instance.name,
                reason=reason)

        # Run the connect
        self._connect_volume(slot_mgr)

        if self.stg_ftsk.name == LOCAL_FEED_TASK:
            self.stg_ftsk.execute()
示例#5
0
    def test_connect_volume_to_initiators(self, mock_get_vm_id,
                                          mock_validate_vioses,
                                          mock_add_vscsi_mapping,
                                          mock_build_itls):
        """Tests that the connect w/out initiators throws errors."""
        mock_get_vm_id.return_value = 'partition_id'

        mock_instance = mock.Mock()
        mock_instance.system_metadata = {}

        mock_validate_vioses.side_effect = p_exc.VolumeAttachFailed(
            volume_id='1', reason='message', instance_name='inst')

        mock_build_itls.return_value = []
        self.assertRaises(p_exc.VolumeAttachFailed,
                          self.vol_drv.connect_volume, self.slot_mgr)

        # Validate that the validate was called with no vioses.
        mock_validate_vioses.assert_called_with(0)
示例#6
0
    def test_connect_volume_to_initiatiors(self, mock_validate_vioses,
                                           mock_add_vscsi_mapping,
                                           mock_lua_recovery, mock_build_itls):
        """Tests that the connect w/out initiators throws errors."""
        mock_lua_recovery.return_value = (hdisk.LUAStatus.DEVICE_AVAILABLE,
                                          'devname', 'udid')

        mock_instance = mock.Mock()
        mock_instance.system_metadata = {}

        mock_validate_vioses.side_effect = p_exc.VolumeAttachFailed(
            volume_id='1', reason='message', instance_name='inst')

        mock_build_itls.return_value = []
        self.assertRaises(p_exc.VolumeAttachFailed,
                          self.vol_drv.connect_volume)

        # Validate that the validate was called with no vioses.
        mock_validate_vioses.assert_called_with(0)
示例#7
0
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['serial']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(
            slot_mgr.build_map.get_vfc_slots(fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(
                    "Mappings were not able to find a proper VIOS. "
                    "The port mappings were %s.",
                    npiv_port_maps,
                    instance=self.instance)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [
                LOG.info, "Adding NPIV mapping for instance %(inst)s "
                "for Virtual I/O Server %(vios)s.", {
                    'inst': self.instance.name,
                    'vios': vios_w.name
                }
            ]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                lpar_slot_num=slot_num,
                logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(
                    vios_w, c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_vol_meta,
                             name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
示例#8
0
    def _connect_volume(self):
        """Connects the volume."""
        def connect_volume_to_vio(vios_w):
            """Attempts to connect a volume to a given VIO.

            :param vios_w: The Virtual I/O Server wrapper to connect to.
            :return: True if the volume was connected.  False if the volume was
                     not (could be the Virtual I/O Server does not have
                     connectivity to the hdisk).
            """
            status, device_name, udid = self._discover_volume_on_vios(
                vios_w, self.volume_id)

            if hdisk.good_discovery(status, device_name):
                # Found a hdisk on this Virtual I/O Server.  Add the action to
                # map it to the VM when the stg_ftsk is executed.
                with lockutils.lock(hash(self)):
                    self._add_append_mapping(vios_w.uuid, device_name)

                # Save the UDID for the disk in the connection info.  It is
                # used for the detach.
                self._set_udid(udid)
                LOG.debug('Device attached: %s', device_name)

                # Valid attachment
                return True

            return False

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio',
            pvm_vios.VIOS.getter(self.adapter,
                                 xag=[pvm_vios.VIOS.xags.STORAGE]))
        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(connect_volume_to_vio,
                                         provides='vio_modified',
                                         flag_update=False)
        ret = connect_ftsk.execute()

        # If no valid hdisk was found, log and exit
        if not any([
                result['vio_modified']
                for result in ret['wrapper_task_rets'].values()
        ]):
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') % {
                         'volume_id': self.volume_id
                     })
            LOG.error(msg)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': msg,
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeAttachFailed(**ex_args)