Ejemplo n.º 1
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()
                if not udid:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                if udid and not device_name:
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not device_name:
                    LOG.warn(_LW(
                        "Disconnect Volume: No mapped device found on Virtual "
                        "I/O Server %(vios)s for volume %(volume_id)s.  "
                        "Volume UDID: %(volume_uid)s"),
                        {'volume_uid': udid, 'volume_id': self.volume_id,
                         'vios': vios_w.name})
                    return False

            except Exception as e:
                LOG.warn(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"),
                    {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
                     'volume_id': self.volume_id})
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."),
                     {'volume_uid': udid, 'volume_id': self.volume_id,
                      'vios_name': vios_w.name, 'hdisk': device_name})

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name)

                # Add a step after the mapping removal to also remove the
                # hdisk.
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True
Ejemplo n.º 2
0
    def test_get_vm_qp(self):
        def adapter_read(root_type, root_id=None, suffix_type=None,
                         suffix_parm=None):
            json_str = (u'{"IsVirtualServiceAttentionLEDOn":"false","Migration'
                        u'State":"Not_Migrating","CurrentProcessingUnits":0.1,'
                        u'"ProgressState":null,"PartitionType":"AIX/Linux","Pa'
                        u'rtitionID":1,"AllocatedVirtualProcessors":1,"Partiti'
                        u'onState":"not activated","RemoteRestartState":"Inval'
                        u'id","OperatingSystemVersion":"Unknown","AssociatedMa'
                        u'nagedSystem":"https://9.1.2.3:12443/rest/api/uom/Man'
                        u'agedSystem/98498bed-c78a-3a4f-b90a-4b715418fcb6","RM'
                        u'CState":"inactive","PowerManagementMode":null,"Parti'
                        u'tionName":"lpar-1-06674231-lpar","HasDedicatedProces'
                        u'sors":"false","ResourceMonitoringIPAddress":null,"Re'
                        u'ferenceCode":"00000000","CurrentProcessors":null,"Cu'
                        u'rrentMemory":512,"SharingMode":"uncapped"}')
            self.assertEqual('LogicalPartition', root_type)
            self.assertEqual('lpar_uuid', root_id)
            self.assertEqual('quick', suffix_type)
            resp = mock.MagicMock()
            if suffix_parm is None:
                resp.body = json_str
            elif suffix_parm == 'PartitionID':
                resp.body = '1'
            elif suffix_parm == 'CurrentProcessingUnits':
                resp.body = '0.1'
            elif suffix_parm == 'AssociatedManagedSystem':
                # The double quotes are important
                resp.body = ('"https://9.1.2.3:12443/rest/api/uom/ManagedSyste'
                             'm/98498bed-c78a-3a4f-b90a-4b715418fcb6"')
            else:
                self.fail('Unhandled quick property key %s' % suffix_parm)
            return resp

        ms_href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/98498bed-'
                   'c78a-3a4f-b90a-4b715418fcb6')
        self.apt.read.side_effect = adapter_read
        self.assertEqual(1, vm.get_vm_id(self.apt, 'lpar_uuid'))
        self.assertEqual(ms_href, vm.get_vm_qp(self.apt, 'lpar_uuid',
                                               'AssociatedManagedSystem'))
        self.assertEqual(0.1, vm.get_vm_qp(self.apt, 'lpar_uuid',
                                           'CurrentProcessingUnits'))
        qp_dict = vm.get_vm_qp(self.apt, 'lpar_uuid')
        self.assertEqual(ms_href, qp_dict['AssociatedManagedSystem'])
        self.assertEqual(1, qp_dict['PartitionID'])
        self.assertEqual(0.1, qp_dict['CurrentProcessingUnits'])

        resp = mock.MagicMock()
        resp.status = 404
        self.apt.read.side_effect = pvm_exc.Error('message', response=resp)
        self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt,
                          'lpar_uuid')

        resp.status = 500

        self.apt.read.side_effect = pvm_exc.Error('message', response=resp)
        self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
                          'lpar_uuid')
Ejemplo n.º 3
0
    def vm_id(self):
        """Return the short ID (not UUID) of the LPAR for our instance.

        This method is unavailable during a pre live migration call since
        there is no instance of the VM on the destination host at the time.
        """
        if self._vm_id is None:
            self._vm_id = vm.get_vm_id(self.adapter, self.vm_uuid)
        return self._vm_id
Ejemplo n.º 4
0
 def set_slot_info():
     vios_wraps = self.stg_ftsk.feed
     partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
     for vios_w in vios_wraps:
         scsi_map = pvm_c_stor.udid_to_scsi_mapping(
             vios_w, path, partition_id)
         if not scsi_map:
             continue
         slot_mgr.register_vscsi_mapping(scsi_map)
Ejemplo n.º 5
0
    def vm_id(self):
        """Return the short ID (not UUID) of the LPAR for our instance.

        This method is unavailable during a pre live migration call since
        there is no instance of the VM on the destination host at the time.
        """
        if self._vm_id is None:
            self._vm_id = vm.get_vm_id(self.adapter, self.vm_uuid)
        return self._vm_id
Ejemplo n.º 6
0
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True):
        """Deletes the virtual optical and (optionally) scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be
                          removed.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        def detach_vopt_from_map(vios_w):
            return tsk_map.detach_storage(vios_w,
                                          lpar_uuid,
                                          match_func=match_func)

        # Add a function to remove the map or detach the vopt
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping if remove_mappings else detach_vopt_from_map)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_wrap = pvm_stg.VG.get(self.adapter,
                                     uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Don't add this task if there is no media to delete (eg. config drive)
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
Ejemplo n.º 7
0
    def _connect_volume(self, slot_mgr):
        """Connects the volume.

        :param connect_volume_to_vio: Function to connect a volume to the vio.
                                      :param vios_w: Vios wrapper.
                                      :return: True if mapping was created.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio',
            pvm_vios.VIOS.getter(
                self.adapter,
                xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(self._connect_volume_to_vio,
                                         slot_mgr,
                                         provides='vio_modified',
                                         flag_update=False)

        ret = connect_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

        # Update the slot information
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, self._get_udid(), partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self._validate_vios_on_connection(vioses_modified)
        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_slot_info,
                             name='hdisk_slot_%s' % self._get_udid()))
Ejemplo n.º 8
0
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True):
        """Deletes the virtual optical and (optionally) scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be
                          removed.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        def detach_vopt_from_map(vios_w):
            return tsk_map.detach_storage(vios_w, lpar_uuid,
                                          match_func=match_func)

        # Add a function to remove the map or detach the vopt
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping if remove_mappings else detach_vopt_from_map)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(
            stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
            client_lpar_id=partition_id, match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        # Don't add this task if there is no media to delete (eg. config drive)
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
Ejemplo n.º 9
0
    def disconnect_image_disk(self, context, instance, lpar_uuid,
                              disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param context: nova context for operation
        :param instance: instance to disconnect the image for.
        :param lpar_uuid: The UUID for the pypowervm LPAR element.
        :param disk_type: The list of disk types to remove or None which means
            to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        return tsk_map.remove_vdisk_mapping(self.adapter, self.vios_uuid,
                                            partition_id,
                                            disk_prefixes=disk_type)
Ejemplo n.º 10
0
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        # Add a function to remove the map
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
Ejemplo n.º 11
0
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        # Add a function to remove the map
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(
            stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
            client_lpar_id=partition_id, match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
Ejemplo n.º 12
0
    def init_recreate_map(self, adapter, vol_drv_iter):
        """To be used on a target system.  Builds the 'slot recreate' map.

        This is to initialize on the target system how the client slots should
        be rebuilt on the client VM.

        This should not be called unless it is a VM recreate.

        :param adapter: The pypowervm adapter.
        :param vol_drv_iter: An iterator of the volume drivers.
        """
        # This should only be called on a rebuild. Focus on being correct
        # first. Performance is secondary.

        # We need to scrub existing stale mappings, including those for the VM
        # we're creating.  It is critical that this happen *before* we create
        # any of the mappings we actually want this VM to have.
        scrub_ftsk = pvm_tstor.ComprehensiveScrub(adapter)
        lpar_id = vm.get_vm_id(adapter, vm.get_pvm_uuid(self.instance))
        pvm_tstor.add_lpar_storage_scrub_tasks([lpar_id], scrub_ftsk,
                                               lpars_exist=True)
        scrub_ftsk.execute()
        self._vios_wraps = scrub_ftsk.feed

        pv_vscsi_vol_to_vio = {}
        fabric_names = []
        for bdm, vol_drv in vol_drv_iter:
            if vol_drv.vol_type() == 'vscsi':
                self._pv_vscsi_vol_to_vio(vol_drv, pv_vscsi_vol_to_vio)
            elif len(fabric_names) == 0 and vol_drv.vol_type() == 'npiv':
                fabric_names = vol_drv._fabric_names()

        # Run the full initialization now that we have the pre-requisite data
        try:
            self._build_map = slot_map.RebuildSlotMap(
                self, self._vios_wraps, pv_vscsi_vol_to_vio, fabric_names)
        except pvm_exc.InvalidHostForRebuild as e:
            raise p_exc.InvalidRebuild(error=six.text_type(e))
Ejemplo n.º 13
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()

                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(_LW(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  No disconnect "
                            "actions to be taken as volume is not healthy."),
                            {'volume_id': self.volume_id, 'vios': vios_w.name})
                        return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"),
                    {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
                     'volume_id': self.volume_id})
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."),
                     {'volume_uid': udid, 'volume_id': self.volume_id,
                      'vios_name': vios_w.name, 'hdisk': device_name})

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True
Ejemplo n.º 14
0
    def _connect_volume(self, slot_mgr):
        """Connects the volume.

        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """

        def connect_volume_to_vio(vios_w):
            """Attempts to connect a volume to a given VIO.

            :param vios_w: The Virtual I/O Server wrapper to connect to.
            :return: True if the volume was connected.  False if the volume was
                     not (could be the Virtual I/O Server does not have
                     connectivity to the hdisk).
            """
            status, device_name, udid = self._discover_volume_on_vios(
                vios_w, self.volume_id)

            # Get the slot and LUA to assign.
            slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)

            if slot_mgr.is_rebuild and not slot:
                LOG.debug('Detected a device with UDID %s on VIOS %s on the '
                          'rebuild that did not exist on the source.  '
                          'Ignoring.', udid, vios_w.uuid)
                return False

            if hdisk.good_discovery(status, device_name):
                # Found a hdisk on this Virtual I/O Server.  Add the action to
                # map it to the VM when the stg_ftsk is executed.
                with lockutils.lock(hash(self)):
                    self._add_append_mapping(vios_w.uuid, device_name,
                                             lpar_slot_num=slot, lua=lua)

                # Save the UDID for the disk in the connection info.  It is
                # used for the detach.
                self._set_udid(udid)
                LOG.debug('Device attached: %s', device_name)

                # Valid attachment
                return True

            return False

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio', pvm_vios.VIOS.getter(
                self.adapter, xag=[pvm_const.XAG.VIO_STOR,
                                   pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(
            connect_volume_to_vio, provides='vio_modified', flag_update=False)

        ret = connect_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

        # Update the slot information
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, self._get_udid(), partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self._validate_vios_on_connection(vioses_modified)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_slot_info, name='hdisk_slot_%s' % self._get_udid()))
Ejemplo n.º 15
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            try:
                device_name = self._get_devname()

                if not device_name:
                    # We lost our bdm data.

                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on future
                    # deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                target_iqn = self.connection_info["data"]["target_iqn"]

                def logout():
                    hdisk.remove_iscsi(self.adapter, target_iqn, vios_w.uuid)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(logout,
                                     name='remove_iSCSI_%s' % target_iqn))
            # Found a valid element to remove
            return True
Ejemplo n.º 16
0
    def test_get_vm_qp(self):
        def adapter_read(root_type,
                         root_id=None,
                         suffix_type=None,
                         suffix_parm=None,
                         helpers=None):
            json_str = (u'{"IsVirtualServiceAttentionLEDOn":"false","Migration'
                        u'State":"Not_Migrating","CurrentProcessingUnits":0.1,'
                        u'"ProgressState":null,"PartitionType":"AIX/Linux","Pa'
                        u'rtitionID":1,"AllocatedVirtualProcessors":1,"Partiti'
                        u'onState":"not activated","RemoteRestartState":"Inval'
                        u'id","OperatingSystemVersion":"Unknown","AssociatedMa'
                        u'nagedSystem":"https://9.1.2.3:12443/rest/api/uom/Man'
                        u'agedSystem/98498bed-c78a-3a4f-b90a-4b715418fcb6","RM'
                        u'CState":"inactive","PowerManagementMode":null,"Parti'
                        u'tionName":"lpar-1-06674231-lpar","HasDedicatedProces'
                        u'sors":"false","ResourceMonitoringIPAddress":null,"Re'
                        u'ferenceCode":"00000000","CurrentProcessors":null,"Cu'
                        u'rrentMemory":512,"SharingMode":"uncapped"}')
            self.assertEqual('LogicalPartition', root_type)
            self.assertEqual('lpar_uuid', root_id)
            self.assertEqual('quick', suffix_type)
            resp = mock.MagicMock()
            if suffix_parm is None:
                resp.body = json_str
            elif suffix_parm == 'PartitionID':
                resp.body = '1'
            elif suffix_parm == 'CurrentProcessingUnits':
                resp.body = '0.1'
            elif suffix_parm == 'AssociatedManagedSystem':
                # The double quotes are important
                resp.body = ('"https://9.1.2.3:12443/rest/api/uom/ManagedSyste'
                             'm/98498bed-c78a-3a4f-b90a-4b715418fcb6"')
            else:
                self.fail('Unhandled quick property key %s' % suffix_parm)
            return resp

        def adpt_read_no_log(*args, **kwds):
            helpers = kwds['helpers']
            try:
                helpers.index(pvm_log.log_helper)
            except ValueError:
                # Successful path since the logger shouldn't be there
                return adapter_read(*args, **kwds)

            self.fail('Log helper was found when it should not be')

        ms_href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/98498bed-'
                   'c78a-3a4f-b90a-4b715418fcb6')
        self.apt.read.side_effect = adapter_read
        self.assertEqual(1, vm.get_vm_id(self.apt, 'lpar_uuid'))
        self.assertEqual(
            ms_href,
            vm.get_vm_qp(self.apt, 'lpar_uuid', 'AssociatedManagedSystem'))
        self.apt.read.side_effect = adpt_read_no_log
        self.assertEqual(
            0.1,
            vm.get_vm_qp(self.apt,
                         'lpar_uuid',
                         'CurrentProcessingUnits',
                         log_errors=False))
        qp_dict = vm.get_vm_qp(self.apt, 'lpar_uuid', log_errors=False)
        self.assertEqual(ms_href, qp_dict['AssociatedManagedSystem'])
        self.assertEqual(1, qp_dict['PartitionID'])
        self.assertEqual(0.1, qp_dict['CurrentProcessingUnits'])

        resp = mock.MagicMock()
        resp.status = 404
        self.apt.read.side_effect = pvm_exc.HttpNotFound(resp)
        self.assertRaises(exception.InstanceNotFound,
                          vm.get_vm_qp,
                          self.apt,
                          'lpar_uuid',
                          log_errors=False)

        self.apt.read.side_effect = pvm_exc.Error("message", response=None)
        self.assertRaises(pvm_exc.Error,
                          vm.get_vm_qp,
                          self.apt,
                          'lpar_uuid',
                          log_errors=False)

        resp.status = 500
        self.apt.read.side_effect = pvm_exc.Error("message", response=resp)
        self.assertRaises(pvm_exc.Error,
                          vm.get_vm_qp,
                          self.apt,
                          'lpar_uuid',
                          log_errors=False)
Ejemplo n.º 17
0
    def disconnect_volume(self, adapter, host_uuid, vm_uuid, instance,
                          connection_info):
        """Disconnect the volume.

        :param adapter: The pypowervm adapter.
        :param host_uuid: The pypowervm UUID of the host.
        :param vm_uuid: The powervm UUID of the VM.
        :param instance: The nova instance that the volume should disconnect
                         from.
        :param connection_info: Comes from the BDM.  Example connection_info:
                {
                'driver_volume_type':'fibre_channel',
                'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
                'data':{
                   'initiator_target_map':{
                      '21000024FF649105':['500507680210E522'],
                      '21000024FF649104':['500507680210E522'],
                      '21000024FF649107':['500507680210E522'],
                      '21000024FF649106':['500507680210E522']
                   },
                   'target_discovered':False,
                   'qos_specs':None,
                   'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
                   'target_lun':0,
                   'access_mode':'rw',
                   'target_wwn':'500507680210E522'
                }
        """

        volume_id = connection_info['data']['volume_id']

        try:
            # Get VIOS feed
            vios_feed = vios.get_active_vioses(adapter, host_uuid,
                                               xag=_XAGS)

            # Iterate through host vios list to find hdisks to disconnect.
            for vio_wrap in vios_feed:
                LOG.debug("vios uuid %s" % vio_wrap.uuid)
                try:
                    volume_udid = self._get_udid(instance, vio_wrap.uuid,
                                                 volume_id)
                    device_name = vio_wrap.hdisk_from_uuid(volume_udid)

                    if not device_name:
                        LOG.info(_LI(u"Disconnect Volume: No mapped device "
                                     "found on vios %(vios)s for volume "
                                     "%(volume_id)s. volume_uid: "
                                     "%(volume_uid)s ")
                                 % {'volume_uid': volume_udid,
                                    'volume_id': volume_id,
                                    'vios': vio_wrap.name})
                        continue

                except Exception as e:
                    LOG.error(_LE(u"Disconnect Volume: Failed to find disk "
                                  "on vios %(vios_name)s for volume "
                                  "%(volume_id)s. volume_uid: %(volume_uid)s."
                                  "Error: %(error)s")
                              % {'error': e, 'volume_uid': volume_udid,
                                 'volume_id': volume_id,
                                 'vios_name': vio_wrap.name})
                    continue

                # We have found the device name
                LOG.info(_LI(u"Disconnect Volume: Discovered the device "
                             "%(hdisk)s on vios %(vios_name)s for volume "
                             "%(volume_id)s. volume_uid: %(volume_uid)s.")
                         % {'volume_uid': volume_udid, 'volume_id': volume_id,
                            'vios_name': vio_wrap.name, 'hdisk': device_name})
                partition_id = vm.get_vm_id(adapter, vm_uuid)
                tsk_map.remove_pv_mapping(adapter, vio_wrap.uuid,
                                          partition_id, device_name)

                try:
                    # Attempt to remove the hDisk
                    hdisk.remove_hdisk(adapter, CONF.host, device_name,
                                       vio_wrap.uuid)
                except Exception as e:
                    # If there is a failure, log it, but don't stop the process
                    msg = (_LW("There was an error removing the hdisk "
                               "%(disk)s from the Virtual I/O Server.") %
                           {'disk': device_name})
                    LOG.warn(msg)
                    LOG.warn(e)

                # Disconnect volume complete, now remove key
                self._delete_udid_key(instance, vio_wrap.uuid, volume_id)

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s') %
                      vm_uuid)
            LOG.exception(_LE(u'Error: %s') % e)
            ex_args = {'backing_dev': device_name,
                       'instance_name': instance.name,
                       'reason': six.text_type(e)}
            raise pexc.VolumeDetachFailed(**ex_args)
Ejemplo n.º 18
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            udid = self._get_udid()
            try:
                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # Check if the hdisk is in a bad state in the I/O Server.
                    # Subsequent scrub code on future deploys will clean it up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  This may be the result of "
                            "an evacuate.", {
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            },
                            instance=self.instance)
                        return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id,
                        'volume_uid': udid
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.  Volume UDID: %(volume_uid)s.", {
                    'volume_uid': udid,
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True
Ejemplo n.º 19
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            # Check if the vios uuid exist in the list
            if vios_w.uuid not in self.vios_uuids:
                LOG.debug(
                    "Skipping disconnect of volume %(vol)s from "
                    "inactive vios uuid %(uuid)s.",
                    dict(vol=self.volume_id, uuid=vios_w.uuid))
                return False

            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)

            device_name = None
            try:
                udid = self._get_udid()
                if udid:
                    # Get the device name using UniqueDeviceID Identifier.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on
                    # future deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                conn_data = self._get_iscsi_conn_props(vios_w)
                if not conn_data:
                    return False
                iqn = conn_data.get("target_iqns", conn_data.get("target_iqn"))
                portal = conn_data.get("target_portals",
                                       conn_data.get("target_portal"))
                lun = conn_data.get("target_luns", conn_data.get("target_lun"))

                def remove():
                    try:
                        hdisk.remove_iscsi(self.adapter,
                                           iqn,
                                           vios_w.uuid,
                                           lun=lun,
                                           iface_name=self.iface_name,
                                           portal=portal,
                                           multipath=self._is_multipath())
                    except (pvm_exc.ISCSIRemoveFailed,
                            pvm_exc.JobRequestFailed) as e:
                        LOG.warning(e)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(remove,
                                     name='remove_%s_from_vios_%s' %
                                     (device_name, vios_w.uuid)))

            # Found a valid element to remove
            return True
Ejemplo n.º 20
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()
                if not udid:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(
                            _LW("Disconnect Volume: The backing hdisk for volume "
                                "%(volume_id)s on Virtual I/O Server %(vios)s is "
                                "not in a valid state.  No disconnect "
                                "actions to be taken as volume is not healthy."
                                ), {
                                    'volume_id': self.volume_id,
                                    'vios': vios_w.name
                                })
                        return False

                if udid and not device_name:
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not device_name:
                    LOG.warning(
                        _LW("Disconnect Volume: No mapped device found on Virtual "
                            "I/O Server %(vios)s for volume %(volume_id)s.  "
                            "Volume UDID: %(volume_uid)s"), {
                                'volume_uid': udid,
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            })
                    return False

            except Exception as e:
                LOG.warning(
                    _LW("Disconnect Volume: Failed to find disk on Virtual I/O "
                        "Server %(vios_name)s for volume %(volume_id)s. Volume "
                        "UDID: %(volume_uid)s.  Error: %(error)s"), {
                            'error': e,
                            'volume_uid': udid,
                            'vios_name': vios_w.name,
                            'volume_id': self.volume_id
                        })
                return False

            # We have found the device name
            LOG.info(
                _LI("Disconnect Volume: Discovered the device %(hdisk)s "
                    "on Virtual I/O Server %(vios_name)s for volume "
                    "%(volume_id)s.  Volume UDID: %(volume_uid)s."), {
                        'volume_uid': udid,
                        'volume_id': self.volume_id,
                        'vios_name': vios_w.name,
                        'hdisk': device_name
                    })

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True
Ejemplo n.º 21
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()
                if not udid:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                if udid and not device_name:
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not device_name:
                    LOG.warn(
                        _LW("Disconnect Volume: No mapped device found on Virtual "
                            "I/O Server %(vios)s for volume %(volume_id)s.  "
                            "Volume UDID: %(volume_uid)s"), {
                                'volume_uid': udid,
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            })
                    return False

            except Exception as e:
                LOG.warn(
                    _LW("Disconnect Volume: Failed to find disk on Virtual I/O "
                        "Server %(vios_name)s for volume %(volume_id)s. Volume "
                        "UDID: %(volume_uid)s.  Error: %(error)s"), {
                            'error': e,
                            'volume_uid': udid,
                            'vios_name': vios_w.name,
                            'volume_id': self.volume_id
                        })
                return False

            # We have found the device name
            LOG.info(
                _LI("Disconnect Volume: Discovered the device %(hdisk)s "
                    "on Virtual I/O Server %(vios_name)s for volume "
                    "%(volume_id)s.  Volume UDID: %(volume_uid)s."), {
                        'volume_uid': udid,
                        'volume_id': self.volume_id,
                        'vios_name': vios_w.name,
                        'hdisk': device_name
                    })

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name)

                # Add a step after the mapping removal to also remove the
                # hdisk.
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True