コード例 #1
0
ファイル: storage.py プロジェクト: arbrandes/nova
    def execute(self):
        """Map the instance's boot disk and discover it."""

        # Search for boot disk on the NovaLink partition.
        if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
            dev_name = self.disk_dvr.get_bootdisk_path(
                self.instance, self.disk_dvr.mp_uuid)
            if dev_name is not None:
                return None, None, dev_name

        self.stg_elem, self.vios_wrap = (
            self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
        new_maps = pvm_smap.find_maps(
            self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
            stg_elem=self.stg_elem)
        if not new_maps:
            raise exception.NewMgmtMappingNotFoundException(
                stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)

        # new_maps should be length 1, but even if it's not - i.e. we somehow
        # matched more than one mapping of the same dev to the management
        # partition from the same VIOS - it is safe to use the first one.
        mapping = new_maps[0]
        # Scan the SCSI bus, discover the disk, find its canonical path.
        LOG.info("Discovering device and path for mapping of %(dev_name)s "
                 "on the management partition.",
                 {'dev_name': self.stg_elem.name}, instance=self.instance)
        self.disk_path = mgmt.discover_vscsi_disk(mapping)
        return self.stg_elem, self.vios_wrap, self.disk_path
コード例 #2
0
    def instance_disk_iter(self,
                           instance,
                           disk_type=DiskType.BOOT,
                           lpar_wrap=None):
        """Return the instance's storage element wrapper of the specified type.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param disk_type: The type of disk to find, one of the DiskType enum
                          values.
        :param lpar_wrap: pypowervm.wrappers.logical_partition.LPAR
                          corresponding to the instance.  If not specified, it
                          will be retrieved; i.e. specify this parameter to
                          save on REST calls.
        :return: Iterator of tuples of (storage_elem, VIOS), where storage_elem
                 is a storage element wrapper (pypowervm.wrappers.storage.VOpt,
                 VDisk, PV, or LU) associated with the instance; and VIOS is
                 the wrapper of the Virtual I/O Server owning that storage
                 element.
        """
        if lpar_wrap is None:
            lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
        match_func = self.disk_match_func(disk_type, instance)
        for vios_uuid in self.vios_uuids:
            vios_wrap = pvm_vios.VIOS.get(self.adapter,
                                          uuid=vios_uuid,
                                          xag=[pvm_const.XAG.VIO_SMAP])
            for scsi_map in tsk_map.find_maps(vios_wrap.scsi_mappings,
                                              client_lpar_id=lpar_wrap.id,
                                              match_func=match_func):
                yield scsi_map.backing_storage, vios_wrap
コード例 #3
0
    def _disconnect_volume(self, slot_mgr):
        # Get the hosting UUID
        nl_vios_wrap = partition.get_mgmt_partition(self.adapter)
        vios_uuid = nl_vios_wrap.uuid
        # Build the match function
        match_func = tsk_map.gen_match_func(pvm_stg.VDisk,
                                            names=[self._get_path()])

        # Make sure the remove function will run within the transaction manager
        def rm_func(vios_w):
            # If the vios doesn't match, just return
            if vios_w.uuid != vios_uuid:
                return None

            LOG.info(_LI("Disconnecting instance %(inst)s from storage "
                         "disks."), {'inst': self.instance.name},
                     instance=self.instance)
            return tsk_map.remove_maps(vios_w,
                                       self.vm_uuid,
                                       match_func=match_func)

        self.stg_ftsk.add_functor_subtask(rm_func)

        # Find the disk directly.
        vios_w = self.stg_ftsk.wrapper_tasks[vios_uuid].wrapper
        mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                     client_lpar_id=self.vm_uuid,
                                     match_func=match_func)

        return [x.backing_storage for x in mappings]
コード例 #4
0
ファイル: media.py プロジェクト: klmitch/nova
    def dlt_vopt(self, instance, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param instance: The nova instance whose VOpt(s) are to be removed.
        :param stg_ftsk: A FeedTask. The actions to modify the storage will be
                         added as batched functions onto the FeedTask.
        """
        lpar_uuid = vm.get_pvm_uuid(instance)

        # The matching function for find_maps, remove_maps
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        # Add a function to remove the mappings
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            tsk_map.remove_maps, lpar_uuid, match_func=match_func)

        # Find the VOpt device based from the mappings
        media_mappings = tsk_map.find_maps(
            stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
            client_lpar_id=lpar_uuid, match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info("Removing virtual optical storage.",
                     instance=instance)
            vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Add task to remove the media if it exists
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #5
0
    def test_add_map(self):
        """Tests the add_map method."""
        vio_resp = tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)
        vio_w = pvm_vios.VIOS.wrap(vio_resp)

        pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid')

        scsi_map = scsi_mapper.build_vscsi_mapping('host_uuid', vio_w,
                                                   LPAR_UUID, pv)

        # Get the original count
        orig_mappings = len(vio_w.scsi_mappings)

        # Add the actual mapping
        resp1 = scsi_mapper.add_map(vio_w, scsi_map)
        self.assertIsNotNone(resp1)
        self.assertIsInstance(resp1, pvm_vios.VSCSIMapping)

        # The mapping should return as None, as it is already there.
        resp2 = scsi_mapper.add_map(vio_w, scsi_map)
        self.assertIsNone(resp2)

        # Make sure only one was added.
        self.assertEqual(orig_mappings + 1, len(vio_w.scsi_mappings))

        # Now make sure the mapping added can be found
        found = scsi_mapper.find_maps(vio_w.scsi_mappings, LPAR_UUID,
                                      stg_elem=pv)
        self.assertEqual(1, len(found))
        self.assertEqual(scsi_map, found[0])
コード例 #6
0
    def execute(self):
        """Map the instance's boot disk and discover it."""

        # Search for boot disk on the NovaLink partition.
        if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
            dev_name = self.disk_dvr.get_bootdisk_path(
                self.instance, self.disk_dvr.mp_uuid)
            if dev_name is not None:
                return None, None, dev_name

        self.stg_elem, self.vios_wrap = (
            self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
        new_maps = pvm_smap.find_maps(
            self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
            stg_elem=self.stg_elem)
        if not new_maps:
            raise exception.NewMgmtMappingNotFoundException(
                stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)

        # new_maps should be length 1, but even if it's not - i.e. we somehow
        # matched more than one mapping of the same dev to the management
        # partition from the same VIOS - it is safe to use the first one.
        mapping = new_maps[0]
        # Scan the SCSI bus, discover the disk, find its canonical path.
        LOG.info("Discovering device and path for mapping of %(dev_name)s "
                 "on the management partition.",
                 {'dev_name': self.stg_elem.name}, instance=self.instance)
        self.disk_path = mgmt.discover_vscsi_disk(mapping)
        return self.stg_elem, self.vios_wrap, self.disk_path
コード例 #7
0
ファイル: fileio.py プロジェクト: esberglu/nova-powervm
    def _disconnect_volume(self, slot_mgr):
        # Build the match function
        match_func = tsk_map.gen_match_func(pvm_stg.VDisk,
                                            names=[self._get_path()])

        # Make sure the remove function will run within the transaction manager
        def rm_func(vios_w):
            # If the vios doesn't match, just return
            if vios_w.uuid not in self.vios_uuids:
                return None

            LOG.info("Disconnecting storage disks.", instance=self.instance)
            removed_maps = tsk_map.remove_maps(vios_w,
                                               self.vm_uuid,
                                               match_func=match_func)
            for rm_map in removed_maps:
                slot_mgr.drop_vscsi_mapping(rm_map)
            return removed_maps

        self.stg_ftsk.add_functor_subtask(rm_func)
        # Find the disk directly.
        vios_w = self.stg_ftsk.wrapper_tasks[self.vios_uuids[0]].wrapper
        mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                     client_lpar_id=self.vm_uuid,
                                     match_func=match_func)

        return [x.backing_storage for x in mappings]
コード例 #8
0
    def _check_host_mappings(self, vios_wrap, device_name):
        """Checks if the given hdisk has multiple mappings

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.

        :return: True is there are multiple instances using the given hdisk
        """
        vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
                                  if v.uuid == vios_wrap.uuid)
        mappings = tsk_map.find_maps(
            vios_scsi_mappings, None,
            tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))

        LOG.info("%(num)d storage mappings found for %(dev)s on VIOS %(vios)s",
                 {
                     'num': len(mappings),
                     'dev': device_name,
                     'vios': vios_wrap.name
                 },
                 instance=self.instance)
        # the mapping is still present as the task feed removes
        # the mapping later
        return len(mappings) > 1
コード例 #9
0
    def dlt_vopt(self, instance, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param instance: The nova instance whose VOpt(s) are to be removed.
        :param stg_ftsk: A FeedTask. The actions to modify the storage will be
                         added as batched functions onto the FeedTask.
        """
        lpar_uuid = vm.get_pvm_uuid(instance)

        # The matching function for find_maps, remove_maps
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        # Add a function to remove the mappings
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            tsk_map.remove_maps, lpar_uuid, match_func=match_func)

        # Find the VOpt device based from the mappings
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=lpar_uuid,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info("Removing virtual optical storage.", instance=instance)
            vg_wrap = pvm_stg.VG.get(self.adapter,
                                     uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Add task to remove the media if it exists
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #10
0
ファイル: ibmi.py プロジェクト: tpeponas/pypowervm
def update_ibmi_settings(adapter, lpar_w, boot_type):
    """Update TaggedIO, Keylock postion and IPL Source of IBMi VM.

    TaggedIO of IBMi vm will be updated to identify the load source,
    alternative load source and console type. Keylock position will be set
    to the value of NORMAL in KeylockPos enumration. IPL Source will be set
    to the value of B in IPLSrc enumration.
    :param adapter: The pypowervm adapter.
    :param lpar_w: The lpar wrapper.
    :param boot_type: The boot connectivity type of the VM. It is a string
                      value that represents one of the values in the
                      BootStorageType enumeration.
    :return: The updated LPAR wrapper. The update is not executed against the
             system, but rather the wrapper itself is updated locally.
    """
    load_source = None
    alt_load_source = None
    client_adapters = []
    if boot_type == pvm_lpar.BootStorageType.VFC:
        LOG.info("Setting Virtual Fibre Channel slot as load source for VM %s",
                 lpar_w.name)
        for vios_wrap in pvm_vios.VIOS.get(adapter, xag=[c.XAG.VIO_FMAP]):
            existing_maps = pvm_vfcmap.find_maps(vios_wrap.vfc_mappings,
                                                 lpar_w.id)
            client_adapters.extend([
                vfcmap.client_adapter for vfcmap in existing_maps
                if vfcmap.client_adapter is not None
            ])
    else:
        # That boot volume, which is vscsi physical volume, ssp lu
        # and local disk, could be handled here.
        LOG.info("Setting Virtual SCSI slot slot as load source for VM %s",
                 lpar_w.name)
        for vios_wrap in pvm_vios.VIOS.get(adapter, xag=[c.XAG.VIO_SMAP]):
            existing_maps = pvm_smap.find_maps(vios_wrap.scsi_mappings,
                                               lpar_w.id)
            client_adapters.extend([
                smap.client_adapter for smap in existing_maps
                if smap.client_adapter is not None
            ])
    slot_nums = set(s.lpar_slot_num for s in client_adapters)
    slot_nums = list(slot_nums)
    slot_nums.sort()
    if len(slot_nums) > 0:
        load_source = slot_nums.pop(0)
    if len(slot_nums) > 0:
        alt_load_source = slot_nums.pop(0)
    if load_source is not None:
        if alt_load_source is None:
            alt_load_source = load_source
        lpar_w.io_config.tagged_io = pvm_bp.TaggedIO.bld(
            adapter,
            load_src=load_source,
            console='HMC',
            alt_load_src=alt_load_source)
    else:
        raise pvmex.IBMiLoadSourceNotFound(vm_name=lpar_w.name)
    lpar_w.desig_ipl_src = pvm_lpar.IPLSrc.B
    lpar_w.keylock_pos = pvm_bp.KeylockPos.NORMAL
    return lpar_w
コード例 #11
0
    def test_add_map(self):
        """Tests the add_map method."""
        pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid')

        scsi_map = scsi_mapper.build_vscsi_mapping('host_uuid',
                                                   self.v1wrap,
                                                   LPAR_UUID,
                                                   pv,
                                                   lpar_slot_num=23)

        # Get the original count
        orig_mappings = len(self.v1wrap.scsi_mappings)

        # Add the actual mapping
        resp1 = scsi_mapper.add_map(self.v1wrap, scsi_map)
        self.assertIsNotNone(resp1)
        self.assertIsInstance(resp1, pvm_vios.VSCSIMapping)

        # Assert that the desired client slot number was set
        self.assertEqual(resp1.client_adapter.lpar_slot_num, 23)

        # The mapping should return as None, as it is already there.
        resp2 = scsi_mapper.add_map(self.v1wrap, scsi_map)
        self.assertIsNone(resp2)

        # Make sure only one was added.
        self.assertEqual(orig_mappings + 1, len(self.v1wrap.scsi_mappings))

        # Now make sure the mapping added can be found
        found = scsi_mapper.find_maps(self.v1wrap.scsi_mappings,
                                      LPAR_UUID,
                                      stg_elem=pv)
        self.assertEqual(1, len(found))
        self.assertEqual(scsi_map, found[0])
コード例 #12
0
ファイル: driver.py プロジェクト: pkdevboxy/nova-powervm
    def instance_disk_iter(self, instance, disk_type=DiskType.BOOT, lpar_wrap=None):
        """Return the instance's storage element wrapper of the specified type.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param disk_type: The type of disk to find, one of the DiskType enum
                          values.
        :param lpar_wrap: pypowervm.wrappers.logical_partition.LPAR
                          corresponding to the instance.  If not specified, it
                          will be retrieved; i.e. specify this parameter to
                          save on REST calls.
        :return: Iterator of tuples of (storage_elem, VIOS), where storage_elem
                 is a storage element wrapper (pypowervm.wrappers.storage.VOpt,
                 VDisk, PV, or LU) associated with the instance; and VIOS is
                 the wrapper of the Virtual I/O Server owning that storage
                 element.
        """
        if lpar_wrap is None:
            lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid)
        match_func = self.disk_match_func(disk_type, instance)
        for vios_uuid in self.vios_uuids:
            vios_wrap = pvm_vios.VIOS.wrap(
                self.adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid, xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])
            )
            for scsi_map in tsk_map.find_maps(
                vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id, match_func=match_func
            ):
                yield scsi_map.backing_storage, vios_wrap
コード例 #13
0
ファイル: ssp.py プロジェクト: openstack/nova-powervm
    def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param instance: instance to disconnect the image for.
        :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for
                         the I/O Operations.  If provided, the Virtual I/O
                         Server mapping updates will be added to the FeedTask.
                         This defers the updates to some later point in time.
                         If the FeedTask is not provided, the updates will be
                         run immediately when this method is executed.
        :param disk_type: The list of disk types to remove or None which means
                          to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        if stg_ftsk is None:
            stg_ftsk = tsk_par.build_active_vio_feed_task(
                self.adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])

        lpar_uuid = vm.get_pvm_uuid(instance)
        match_func = tsk_map.gen_match_func(pvm_stg.LU, prefixes=disk_type)

        # Delay run function to remove the mapping between the VM and the LU
        def rm_func(vios_w):
            LOG.info("Removing SSP disk connection to VIOS %(vios)s.",
                     {'vios': vios_w.name},
                     instance=instance)
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        # Add the mapping to *each* VIOS on the LPAR's host.
        # The LPAR's host has to be self.host_uuid, else the PowerVM API will
        # fail.
        #
        # Note - this may not be all the VIOSes on the system...just the ones
        # in the SSP cluster.
        #
        # The mappings will normally be the same on all VIOSes, unless a VIOS
        # was down when a disk was added.  So for the return value, we need to
        # collect the union of all relevant mappings from all VIOSes.
        lu_set = set()
        for vios_uuid in self.vios_uuids:
            # Add the remove for the VIO
            stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)

            # Find the active LUs so that a delete op knows what to remove.
            vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
            mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                         client_lpar_id=lpar_uuid,
                                         match_func=match_func)
            if mappings:
                lu_set.update([x.backing_storage for x in mappings])

        # Run the FeedTask if it was built locally
        if stg_ftsk.name == 'ssp':
            stg_ftsk.execute()

        return list(lu_set)
コード例 #14
0
ファイル: media.py プロジェクト: andymcc/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True):
        """Deletes the virtual optical and (optionally) scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be
                          removed.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        def detach_vopt_from_map(vios_w):
            return tsk_map.detach_storage(vios_w,
                                          lpar_uuid,
                                          match_func=match_func)

        # Add a function to remove the map or detach the vopt
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping if remove_mappings else detach_vopt_from_map)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_wrap = pvm_stg.VG.get(self.adapter,
                                     uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Don't add this task if there is no media to delete (eg. config drive)
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #15
0
    def disconnect_image_disk(self,
                              context,
                              instance,
                              stg_ftsk=None,
                              disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param context: nova context for operation
        :param instance: instance to disconnect the image for.
        :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
                         I/O Operations.  If provided, the Virtual I/O Server
                         mapping updates will be added to the FeedTask.  This
                         defers the updates to some later point in time.  If
                         the FeedTask is not provided, the updates will be run
                         immediately when this method is executed.
        :param disk_type: The list of disk types to remove or None which means
                          to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        lpar_uuid = vm.get_pvm_uuid(instance)

        # Ensure we have a transaction manager.
        if stg_ftsk is None:
            stg_ftsk = vios.build_tx_feed_task(
                self.adapter,
                self.host_uuid,
                name='localdisk',
                xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])

        # Build the match function
        match_func = tsk_map.gen_match_func(pvm_stg.VDisk, prefixes=disk_type)

        # Make sure the remove function will run within the transaction manager
        def rm_func(vios_w):
            LOG.info(
                _LI("Disconnecting instance %(inst)s from storage disks.") %
                {'inst': instance.name})
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(rm_func)

        # Find the disk directly.
        vios_w = stg_ftsk.wrapper_tasks[self._vios_uuid].wrapper
        mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                     client_lpar_id=lpar_uuid,
                                     match_func=match_func)

        # Run the transaction manager if built locally.  Must be done after
        # the find to make sure the mappings were found previously.
        if stg_ftsk.name == 'localdisk':
            stg_ftsk.execute()

        return [x.backing_storage for x in mappings]
コード例 #16
0
ファイル: media.py プロジェクト: adreznec/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True):
        """Deletes the virtual optical and (optionally) scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be
                          removed.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        def detach_vopt_from_map(vios_w):
            return tsk_map.detach_storage(vios_w, lpar_uuid,
                                          match_func=match_func)

        # Add a function to remove the map or detach the vopt
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping if remove_mappings else detach_vopt_from_map)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(
            stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
            client_lpar_id=partition_id, match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        # Don't add this task if there is no media to delete (eg. config drive)
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #17
0
ファイル: localdisk.py プロジェクト: pkdevboxy/nova-powervm
    def disconnect_image_disk(self, context, instance, stg_ftsk=None,
                              disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param context: nova context for operation
        :param instance: instance to disconnect the image for.
        :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
                         I/O Operations.  If provided, the Virtual I/O Server
                         mapping updates will be added to the FeedTask.  This
                         defers the updates to some later point in time.  If
                         the FeedTask is not provided, the updates will be run
                         immediately when this method is executed.
        :param disk_type: The list of disk types to remove or None which means
                          to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        lpar_uuid = vm.get_pvm_uuid(instance)

        # Ensure we have a transaction manager.
        if stg_ftsk is None:
            stg_ftsk = vios.build_tx_feed_task(
                self.adapter, self.host_uuid, name='localdisk',
                xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])

        # Build the match function
        match_func = tsk_map.gen_match_func(pvm_stg.VDisk, prefixes=disk_type)

        # Make sure the remove function will run within the transaction manager
        def rm_func(vios_w):
            LOG.info(_LI("Disconnecting instance %(inst)s from storage disks.")
                     % {'inst': instance.name})
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(rm_func)

        # Find the disk directly.
        vios_w = stg_ftsk.wrapper_tasks[self._vios_uuid].wrapper
        mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                     client_lpar_id=lpar_uuid,
                                     match_func=match_func)

        # Run the transaction manager if built locally.  Must be done after
        # the find to make sure the mappings were found previously.
        if stg_ftsk.name == 'localdisk':
            stg_ftsk.execute()

        return [x.backing_storage for x in mappings]
コード例 #18
0
ファイル: vscsi.py プロジェクト: kairoaraujo/nova-powervm
    def _check_host_mappings(self, vios_wrap, device_name):
        """Checks if the given hdisk has multiple mappings

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.

        :return: True is there are multiple instances using the given hdisk
        """
        vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed if v.uuid == vios_wrap.uuid)
        mappings = tsk_map.find_maps(vios_scsi_mappings, None, tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))

        LOG.info(_LI("%(num)d Storage Mappings found for %(dev)s"), {"num": len(mappings), "dev": device_name})
        # the mapping is still present as the task feed removes
        # the mapping later
        return len(mappings) > 1
コード例 #19
0
    def detach_disk(self, instance):
        """Detaches the storage adapters from the disk.

        :param instance: instance from which to detach the image.
        :return: A list of all the backing storage elements that were detached
                 from the I/O Server and VM.
        """
        stg_ftsk = tsk_par.build_active_vio_feed_task(
            self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])

        lpar_uuid = vm.get_pvm_uuid(instance)
        match_func = tsk_map.gen_match_func(pvm_stg.LU)

        def rm_func(vwrap):
            LOG.info("Removing SSP disk connection to VIOS %s.",
                     vwrap.name,
                     instance=instance)
            return tsk_map.remove_maps(vwrap, lpar_uuid, match_func=match_func)

        # Remove the mapping from *each* VIOS on the LPAR's host.
        # The LPAR's host has to be self._host_uuid, else the PowerVM API will
        # fail.
        #
        # Note - this may not be all the VIOSes on the system...just the ones
        # in the SSP cluster.
        #
        # The mappings will normally be the same on all VIOSes, unless a VIOS
        # was down when a disk was added.  So for the return value, we need to
        # collect the union of all relevant mappings from all VIOSes.
        lu_set = set()
        for vios_uuid in self._vios_uuids:
            # Add the remove for the VIO
            stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)

            # Find the active LUs so that a delete op knows what to remove.
            vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
            mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                         client_lpar_id=lpar_uuid,
                                         match_func=match_func)
            if mappings:
                lu_set.update([x.backing_storage for x in mappings])

        stg_ftsk.execute()

        return list(lu_set)
コード例 #20
0
ファイル: ssp.py プロジェクト: Juniper/nova
    def detach_disk(self, instance):
        """Detaches the storage adapters from the disk.

        :param instance: instance from which to detach the image.
        :return: A list of all the backing storage elements that were detached
                 from the I/O Server and VM.
        """
        stg_ftsk = tsk_par.build_active_vio_feed_task(
            self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])

        lpar_uuid = vm.get_pvm_uuid(instance)
        match_func = tsk_map.gen_match_func(pvm_stg.LU)

        def rm_func(vwrap):
            LOG.info("Removing SSP disk connection to VIOS %s.",
                     vwrap.name, instance=instance)
            return tsk_map.remove_maps(vwrap, lpar_uuid,
                                       match_func=match_func)

        # Remove the mapping from *each* VIOS on the LPAR's host.
        # The LPAR's host has to be self.host_uuid, else the PowerVM API will
        # fail.
        #
        # Note - this may not be all the VIOSes on the system...just the ones
        # in the SSP cluster.
        #
        # The mappings will normally be the same on all VIOSes, unless a VIOS
        # was down when a disk was added.  So for the return value, we need to
        # collect the union of all relevant mappings from all VIOSes.
        lu_set = set()
        for vios_uuid in self._vios_uuids:
            # Add the remove for the VIO
            stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)

            # Find the active LUs so that a delete op knows what to remove.
            vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
            mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                         client_lpar_id=lpar_uuid,
                                         match_func=match_func)
            if mappings:
                lu_set.update([x.backing_storage for x in mappings])

        stg_ftsk.execute()

        return list(lu_set)
コード例 #21
0
ファイル: fcvscsi.py プロジェクト: arbrandes/nova
    def _check_host_mappings(self, vios_wrap, device_name):
        """Checks if the given hdisk has multiple mappings

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        :return: True if there are multiple instances using the given hdisk
        """
        vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
                                  if v.uuid == vios_wrap.uuid)
        mappings = tsk_map.find_maps(
            vios_scsi_mappings, None,
            tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))

        LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
                  "%(vios)s", {'num': len(mappings), 'dev': device_name,
                               'vios': vios_wrap.name}, instance=self.instance)
        # The mapping is still present as the task feed removes it later.
        return len(mappings) > 1
コード例 #22
0
    def get_bootdisk_path(self, instance, vios_uuid):
        """Find the local path for the instance's boot disk.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
        :return: Local path for instance's boot disk.
        """
        vm_uuid = vm.get_pvm_uuid(instance)
        match_func = self._disk_match_func(DiskType.BOOT, instance)
        vios_wrap = pvm_vios.VIOS.get(self._adapter,
                                      uuid=vios_uuid,
                                      xag=[pvm_const.XAG.VIO_SMAP])
        maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
                                 client_lpar_id=vm_uuid,
                                 match_func=match_func)
        if maps:
            return maps[0].server_adapter.backing_dev_name
        return None
コード例 #23
0
ファイル: media.py プロジェクト: sarkartanzil/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        # Add a function to remove the map
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #24
0
    def _get_bootdisk_iter(self, instance):
        """Return an iterator of (storage_elem, VIOS) tuples for the instance.

        storage_elem is a pypowervm storage element wrapper associated with
        the instance boot disk and VIOS is the wrapper of the Virtual I/O
        server owning that storage element.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :return: Iterator of tuples of (storage_elem, VIOS).
        """
        lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
        match_func = self._disk_match_func(DiskType.BOOT, instance)
        for vios_uuid in self._vios_uuids:
            vios_wrap = pvm_vios.VIOS.get(self._adapter,
                                          uuid=vios_uuid,
                                          xag=[pvm_const.XAG.VIO_SMAP])
            for scsi_map in tsk_map.find_maps(vios_wrap.scsi_mappings,
                                              client_lpar_id=lpar_wrap.id,
                                              match_func=match_func):
                yield scsi_map.backing_storage, vios_wrap
コード例 #25
0
    def boot_disk_path_for_instance(self, instance, vios_uuid):
        """Find scsi mappings on given VIOS for the instance.

        This method finds all scsi mappings on a given vios that are associated
        with the instance and disk_type.

        :param instance: nova.objects.instance.Instance object owning the
                         requested disk.
        :param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
        :return: Iterator of scsi mappings that are associated with the
                 instance and disk_type.
        """
        vm_uuid = vm.get_pvm_uuid(instance)
        match_func = self.disk_match_func(DiskType.BOOT, instance)
        vios_wrap = pvm_vios.VIOS.get(self.adapter, uuid=vios_uuid,
                                      xag=[pvm_const.XAG.VIO_SMAP])
        maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
                                 client_lpar_id=vm_uuid, match_func=match_func)
        if maps:
            return maps[0].server_adapter.backing_dev_name
        return None
コード例 #26
0
ファイル: media.py プロジェクト: pratgohi/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        # Add a function to remove the map
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(
            stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
            client_lpar_id=partition_id, match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #27
0
    def execute(self):
        """Map the instance's boot disk and discover it."""
        LOG.info(_LI("Mapping boot disk of instance %(instance_name)s to "
                     "management partition."),
                 {'instance_name': self.instance.name})
        self.stg_elem, self.vios_wrap = (
            self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
        new_maps = pvm_smap.find_maps(
            self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
            stg_elem=self.stg_elem)
        if not new_maps:
            raise npvmex.NewMgmtMappingNotFoundException(
                stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)

        # new_maps should be length 1, but even if it's not - i.e. we somehow
        # matched more than one mapping of the same dev to the management
        # partition from the same VIOS - it is safe to use the first one.
        the_map = new_maps[0]
        # Scan the SCSI bus, discover the disk, find its canonical path.
        LOG.info(_LI("Discovering device and path for mapping of %(dev_name)s "
                     "on the management partition."),
                 {'dev_name': self.stg_elem.name})
        self.disk_path = mgmt.discover_vscsi_disk(the_map)
        return self.stg_elem, self.vios_wrap, self.disk_path
コード例 #28
0
    def test_find_maps(self):
        """find_maps() tests not covered elsewhere."""
        maps = self.v1wrap.scsi_mappings
        # Specifying both match_func and stg_elem raises ValueError
        self.assertRaises(ValueError,
                          scsi_mapper.find_maps,
                          maps,
                          1,
                          match_func=isinstance,
                          stg_elem='foo')
        # Omitting match_func and stg_elem matches all entries with specified
        # LPAR ID.
        # For LPAR ID 2, that should be all of 'em.
        matches = scsi_mapper.find_maps(maps, 2)
        self.assertEqual(len(maps), len(matches))
        for exp, act in zip(maps, matches):
            self.assertEqual(exp, act)
        # For the right LPAR UUID, that should be all of 'em.
        matches = scsi_mapper.find_maps(maps, LPAR_UUID)
        self.assertEqual(len(maps), len(matches))
        for exp, act in zip(maps, matches):
            self.assertEqual(exp, act)
        # For the wrong LPAR ID, it should be none of 'em.
        matches = scsi_mapper.find_maps(maps, 1)
        self.assertEqual(0, len(matches))
        # For the wrong LPAR UUID, it should be none of 'em.
        matches = scsi_mapper.find_maps(maps, LPAR_UUID[:35] + '0')
        self.assertEqual(0, len(matches))
        # Specific storage element generates match func for that element.
        matches = scsi_mapper.find_maps(maps,
                                        2,
                                        stg_elem=maps[2].backing_storage)
        self.assertEqual(1, len(matches))
        self.assertEqual(maps[2], matches[0])
        # Test find maps when client lpar id is not specified and backing
        # storage is given
        matches = scsi_mapper.find_maps(maps,
                                        None,
                                        stg_elem=maps[2].backing_storage)
        self.assertEqual(1, len(matches))
        self.assertEqual(maps[2], matches[0])

        # All the mappings in VIO_MULTI_MAP_FILE are "complete".  Now play with
        # some that aren't.
        maps = self.v2wrap.scsi_mappings
        # Map 0 has only a server adapter.  We should find it if we specify the
        # LPAR ID...
        matches = scsi_mapper.find_maps(maps, 27, include_orphans=True)
        self.assertEqual(maps[0], matches[0])
        # ...but only if allowing orphans
        matches = scsi_mapper.find_maps(maps, 27, include_orphans=False)
        self.assertEqual(0, len(matches))
        # Matching by LPAR UUID.  Maps 12, 25, and 26 have this UUID...
        uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E'
        matches = scsi_mapper.find_maps(maps, uuid, include_orphans=True)
        self.assertEqual(3, len(matches))
        self.assertEqual(maps[12], matches[0])
        self.assertEqual(maps[25], matches[1])
        self.assertEqual(maps[26], matches[2])
        # ...but 25 is an orphan (no client adapter).
        uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E'
        matches = scsi_mapper.find_maps(maps, uuid)
        self.assertEqual(2, len(matches))
        self.assertEqual(maps[12], matches[0])
        self.assertEqual(maps[26], matches[1])
コード例 #29
0
    def test_find_maps(self):
        """find_maps() tests not covered elsewhere."""
        maps = pvm_vios.VIOS.wrap(
            tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)).scsi_mappings
        # Specifying both match_func and stg_elem raises ValueError
        self.assertRaises(ValueError, scsi_mapper.find_maps, maps, 1,
                          match_func=isinstance, stg_elem='foo')
        # Omitting match_func and stg_elem matches all entries with specified
        # LPAR ID.
        # For LPAR ID 2, that should be all of 'em.
        matches = scsi_mapper.find_maps(maps, 2)
        self.assertEqual(len(maps), len(matches))
        for exp, act in zip(maps, matches):
            self.assertEqual(exp, act)
        # For the right LPAR UUID, that should be all of 'em.
        matches = scsi_mapper.find_maps(maps, LPAR_UUID)
        self.assertEqual(len(maps), len(matches))
        for exp, act in zip(maps, matches):
            self.assertEqual(exp, act)
        # For the wrong LPAR ID, it should be none of 'em.
        matches = scsi_mapper.find_maps(maps, 1)
        self.assertEqual(0, len(matches))
        # For the wrong LPAR UUID, it should be none of 'em.
        matches = scsi_mapper.find_maps(maps, LPAR_UUID[:35] + '0')
        self.assertEqual(0, len(matches))
        # Specific storage element generates match func for that element.
        matches = scsi_mapper.find_maps(maps, 2,
                                        stg_elem=maps[2].backing_storage)
        self.assertEqual(1, len(matches))
        self.assertEqual(maps[2], matches[0])
        # Test find maps when client lpar id is not specified and backing
        # storage is given
        matches = scsi_mapper.find_maps(maps, None,
                                        stg_elem=maps[2].backing_storage)
        self.assertEqual(1, len(matches))
        self.assertEqual(maps[2], matches[0])

        # All the mappings in VIO_MULTI_MAP_FILE are "complete".  Now play with
        # some that aren't.
        maps = pvm_vios.VIOS.wrap(
            tju.load_file(VIO_MULTI_MAP_FILE2, self.adpt)).scsi_mappings
        # Map 0 has only a server adapter.  We should find it if we specify the
        # LPAR ID...
        matches = scsi_mapper.find_maps(maps, 27, include_orphans=True)
        self.assertEqual(maps[0], matches[0])
        # ...but only if allowing orphans
        matches = scsi_mapper.find_maps(maps, 27, include_orphans=False)
        self.assertEqual(0, len(matches))
        # Matching by LPAR UUID.  Maps 12, 25, and 26 have this UUID...
        uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E'
        matches = scsi_mapper.find_maps(maps, uuid, include_orphans=True)
        self.assertEqual(3, len(matches))
        self.assertEqual(maps[12], matches[0])
        self.assertEqual(maps[25], matches[1])
        self.assertEqual(maps[26], matches[2])
        # ...but 25 is an orphan (no client adapter).
        uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E'
        matches = scsi_mapper.find_maps(maps, uuid)
        self.assertEqual(2, len(matches))
        self.assertEqual(maps[12], matches[0])
        self.assertEqual(maps[26], matches[1])
コード例 #30
0
ファイル: ibmi.py プロジェクト: kairoaraujo/pypowervm
def update_ibmi_settings(adapter, lpar_w, boot_type):
    """Update TaggedIO, Keylock postion and IPL Source of IBMi VM.

    TaggedIO of IBMi vm will be updated to identify the load source,
    alternative load source and console type. Keylock position will be set
    to the value of NORMAL in KeylockPos enumration. IPL Source will be set
    to the value of B in IPLSrc enumration.
    :param adapter: The pypowervm adapter.
    :param lpar_w: The lpar wrapper.
    :param boot_type: The boot connectivity type of the VM. It is a string
                      value that represents one of the values in the
                      BootStorageType enumeration.
    :return: The updated LPAR wrapper. The update is not executed against the
             system, but rather the wrapper itself is updated locally.
    """
    load_source = None
    alt_load_source = None
    client_adapters = []
    console_type = 'HMC'
    # Set the console type as vea adapter if the host is not managed by HMC
    if adapter.traits.vea_as_ibmi_console:
        msg = _LI("Setting Virtual Ethernet Adapter slot as console type for "
                  "VM %s") % lpar_w.name
        LOG.info(msg)
        cna_wrap = pvm_net.CNA.wrap(adapter.read(
            pvm_lpar.LPAR.schema_type,
            root_id=lpar_w.partition_uuid,
            child_type=pvm_net.CNA.schema_type))
        cna_slot_nums = set(cna.slot for cna in cna_wrap)
        cna_slot_nums = list(cna_slot_nums)
        cna_slot_nums.sort()
        if len(cna_slot_nums) > 0:
            console_type = cna_slot_nums.pop(0)
    if boot_type == pvm_lpar.BootStorageType.VFC:
        msg = _LI("Setting Virtual Fibre Channel slot as load source for VM "
                  "%s") % lpar_w.name
        LOG.info(msg)
        vios_wraps = pvm_vios.VIOS.wrap(adapter.read(
            pvm_vios.VIOS.schema_type,
            xag=[pvm_vios.VIOS.xags.FC_MAPPING]))
        for vios_wrap in vios_wraps:
            existing_maps = pvm_vfcmap.find_maps(
                vios_wrap.vfc_mappings, lpar_w.id)
            client_adapters.extend([vfcmap.client_adapter
                                    for vfcmap in existing_maps])
    else:
        # That boot volume, which is vscsi physical volume, ssp lu
        # and local disk, could be handled here.
        msg = _LI("Setting Virtual SCSI slot slot as load source for VM "
                  "%s") % lpar_w.name
        LOG.info(msg)
        vios_wraps = pvm_vios.VIOS.wrap(adapter.read(
            pvm_vios.VIOS.schema_type,
            xag=[pvm_vios.VIOS.xags.SCSI_MAPPING]))
        for vios_wrap in vios_wraps:
            existing_maps = pvm_smap.find_maps(
                vios_wrap.scsi_mappings, lpar_w.id)
            client_adapters.extend([smap.client_adapter
                                    for smap in existing_maps])
    slot_nums = set(s.lpar_slot_num for s in client_adapters)
    slot_nums = list(slot_nums)
    slot_nums.sort()
    if len(slot_nums) > 0:
        load_source = slot_nums.pop(0)
    if len(slot_nums) > 0:
        alt_load_source = slot_nums.pop(0)
    if load_source is not None:
        if alt_load_source is None:
            alt_load_source = load_source
        lpar_w.io_config.tagged_io = pvm_bp.TaggedIO.bld(
            adapter, load_src=load_source,
            console=console_type,
            alt_load_src=alt_load_source)
    else:
        raise pvmex.IBMiLoadSourceNotFound(vm_name=lpar_w.name)
    lpar_w.desig_ipl_src = pvm_lpar.IPLSrc.B
    lpar_w.keylock_pos = pvm_bp.KeylockPos.NORMAL
    return lpar_w
コード例 #31
0
ファイル: ssp.py プロジェクト: saydulk/nova-powervm
    def disconnect_image_disk(self, context, instance, stg_ftsk=None,
                              disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param context: nova context for operation
        :param instance: instance to disconnect the image for.
        :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for
                         the I/O Operations.  If provided, the Virtual I/O
                         Server mapping updates will be added to the FeedTask.
                         This defers the updates to some later point in time.
                         If the FeedTask is not provided, the updates will be
                         run immediately when this method is executed.
        :param disk_type: The list of disk types to remove or None which means
                          to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        if stg_ftsk is None:
            stg_ftsk = vios.build_tx_feed_task(
                self.adapter, self.host_uuid, name='ssp',
                xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])

        lpar_uuid = vm.get_pvm_uuid(instance)
        match_func = tsk_map.gen_match_func(pvm_stg.LU, prefixes=disk_type)

        # Delay run function to remove the mapping between the VM and the LU
        def rm_func(vios_w):
            LOG.info(_LI("Removing SSP disk connection between VM %(vm)s and "
                         "VIOS %(vios)s."),
                     {'vm': instance.name, 'vios': vios_w.name})
            return tsk_map.remove_maps(vios_w, lpar_uuid,
                                       match_func=match_func)

        # Add the mapping to *each* VIOS on the LPAR's host.
        # The LPAR's host has to be self.host_uuid, else the PowerVM API will
        # fail.
        #
        # Note - this may not be all the VIOSes on the system...just the ones
        # in the SSP cluster.
        #
        # The mappings will normally be the same on all VIOSes, unless a VIOS
        # was down when a disk was added.  So for the return value, we need to
        # collect the union of all relevant mappings from all VIOSes.
        lu_set = set()
        for vios_uuid in self.vios_uuids:
            # Add the remove for the VIO
            stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)

            # Find the active LUs so that a delete op knows what to remove.
            vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
            mappings = tsk_map.find_maps(vios_w.scsi_mappings,
                                         client_lpar_id=lpar_uuid,
                                         match_func=match_func)
            if mappings:
                lu_set.update([x.backing_storage for x in mappings])

        # Run the FeedTask if it was built locally
        if stg_ftsk.name == 'ssp':
            stg_ftsk.execute()

        return list(lu_set)
コード例 #32
0
def update_ibmi_settings(adapter, lpar_w, boot_type):
    """Update TaggedIO, Keylock postion and IPL Source of IBMi VM.

    TaggedIO of IBMi vm will be updated to identify the load source,
    alternative load source and console type. Keylock position will be set
    to the value of NORMAL in KeylockPos enumration. IPL Source will be set
    to the value of B in IPLSrc enumration.
    :param adapter: The pypowervm adapter.
    :param lpar_w: The lpar wrapper.
    :param boot_type: The boot connectivity type of the VM. It is a string
                      value that represents one of the values in the
                      BootStorageType enumeration.
    :return: The updated LPAR wrapper. The update is not executed against the
             system, but rather the wrapper itself is updated locally.
    """
    load_source = None
    alt_load_source = None
    client_adapters = []
    console_type = 'HMC'
    # Set the console type as vea adapter if the host is not managed by HMC
    if adapter.traits.vea_as_ibmi_console:
        msg = _LI("Setting Virtual Ethernet Adapter slot as console type for "
                  "VM %s") % lpar_w.name
        LOG.info(msg)
        cna_wrap = pvm_net.CNA.wrap(
            adapter.read(pvm_lpar.LPAR.schema_type,
                         root_id=lpar_w.partition_uuid,
                         child_type=pvm_net.CNA.schema_type))
        cna_slot_nums = set(cna.slot for cna in cna_wrap)
        cna_slot_nums = list(cna_slot_nums)
        cna_slot_nums.sort()
        if len(cna_slot_nums) > 0:
            console_type = cna_slot_nums.pop(0)
    if boot_type == pvm_lpar.BootStorageType.VFC:
        msg = _LI("Setting Virtual Fibre Channel slot as load source for VM "
                  "%s") % lpar_w.name
        LOG.info(msg)
        vios_wraps = pvm_vios.VIOS.wrap(
            adapter.read(pvm_vios.VIOS.schema_type,
                         xag=[pvm_vios.VIOS.xags.FC_MAPPING]))
        for vios_wrap in vios_wraps:
            existing_maps = pvm_vfcmap.find_maps(vios_wrap.vfc_mappings,
                                                 lpar_w.id)
            client_adapters.extend(
                [vfcmap.client_adapter for vfcmap in existing_maps])
    else:
        # That boot volume, which is vscsi physical volume, ssp lu
        # and local disk, could be handled here.
        msg = _LI("Setting Virtual SCSI slot slot as load source for VM "
                  "%s") % lpar_w.name
        LOG.info(msg)
        vios_wraps = pvm_vios.VIOS.wrap(
            adapter.read(pvm_vios.VIOS.schema_type,
                         xag=[pvm_vios.VIOS.xags.SCSI_MAPPING]))
        for vios_wrap in vios_wraps:
            existing_maps = pvm_smap.find_maps(vios_wrap.scsi_mappings,
                                               lpar_w.id)
            client_adapters.extend(
                [smap.client_adapter for smap in existing_maps])
    slot_nums = set(s.lpar_slot_num for s in client_adapters)
    slot_nums = list(slot_nums)
    slot_nums.sort()
    if len(slot_nums) > 0:
        load_source = slot_nums.pop(0)
    if len(slot_nums) > 0:
        alt_load_source = slot_nums.pop(0)
    if load_source is not None:
        if alt_load_source is None:
            alt_load_source = load_source
        lpar_w.io_config.tagged_io = pvm_bp.TaggedIO.bld(
            adapter,
            load_src=load_source,
            console=console_type,
            alt_load_src=alt_load_source)
    else:
        raise pvmex.IBMiLoadSourceNotFound(vm_name=lpar_w.name)
    lpar_w.desig_ipl_src = pvm_lpar.IPLSrc.B
    lpar_w.keylock_pos = pvm_bp.KeylockPos.NORMAL
    return lpar_w