Example #1
0
    def test_index_mappings(self):
        vwrap = pvm_vios.VIOS.wrap(tju.load_file(VIO_MULTI_MAP_FILE2,
                                                 self.adpt))
        idx = scsi_mapper.index_mappings(vwrap.scsi_mappings)

        self.assertEqual({
            'by-lpar-id', 'by-lpar-uuid', 'by-storage-udid'}, set(idx.keys()))

        exp_lpar_ids = ('2', '5', '6', '7', '10', '11', '12', '13', '14', '15',
                        '16', '17', '18', '19', '20', '21', '22', '23', '24',
                        '27', '28', '29', '33', '35', '36', '39', '40')
        self.assertEqual(set(exp_lpar_ids), set(idx['by-lpar-id'].keys()))
        # Each mapping has a different LPAR ID, so each LPAR ID only has one
        # mapping
        for lpar_id in exp_lpar_ids:
            maplist = idx['by-lpar-id'][lpar_id]
            self.assertEqual(1, len(maplist))
            self.assertIsInstance(maplist[0], pvm_vios.VSCSIMapping)
            self.assertEqual(lpar_id, str(maplist[0].server_adapter.lpar_id))

        # Not all mappings have client_lpar_href, so this list is shorter.
        exp_lpar_uuids = ('0C0A6EBE-7BF4-4707-8780-A140F349E42E',
                          '0FB69DD7-4B93-4C09-8916-8BC9821ABAAC',
                          '263EE77B-AD6E-4920-981A-4B7D245B8571',
                          '292ACAF5-C96B-447A-8C7E-7503D80AA33E',
                          '32AA6AA5-CCE6-4523-860C-0852455036BE',
                          '3CE30EC6-C98A-4A58-A764-09DAC7C324BC',
                          '615C9134-243D-4A11-93EB-C0556664B761',
                          '7CFDD55B-E0D7-4B8C-8254-9305E31BB1DC')
        self.assertEqual(set(exp_lpar_uuids), set(idx['by-lpar-uuid'].keys()))
        # Of ten mappings with client_lpar_href, three have the same UUID.
        for lpar_uuid in exp_lpar_uuids:
            maplist = idx['by-lpar-uuid'][lpar_uuid]
            for smap in maplist:
                self.assertIsInstance(smap, pvm_vios.VSCSIMapping)
                self.assertTrue(smap.client_lpar_href.endswith(lpar_uuid))
            if lpar_uuid == '0C0A6EBE-7BF4-4707-8780-A140F349E42E':
                self.assertEqual(3, len(maplist))
            else:
                self.assertEqual(1, len(maplist))

        # Only five mappings have storage, and all are different
        self.assertEqual(5, len(idx['by-storage-udid'].keys()))
        for sudid in idx['by-storage-udid']:
            self.assertEqual(1, len(idx['by-storage-udid'][sudid]))
Example #2
0
    def execute(self, wrapper_task_rets):
        """Remove the storage elements associated with the deleted mappings.

        We remove storage elements for each VIOS, but only those we can be sure
        belong ONLY to that VIOS.  That is, we do not remove SSP Logical Units
        because they may be mapped from some other VIOS in the cluster - one we
        don't even know about.
        """
        # Accumulate removal tasks
        rmtasks = []
        for vuuid, rets in wrapper_task_rets.items():
            vwrap = rets['wrapper']
            # VFC mappings don't have storage we can get to, so ignore those.

            # We may get removals from more than one subtask.  All will have
            # the 'vscsi_removals_' prefix.  There may be some overlap, but
            # the removal methods will ignore duplicates.
            vscsi_rms = []
            for vrk in (k for k in rets if k.startswith('vscsi_removals_')):
                vscsi_rms.extend(rets[vrk])

            # We can short out of this VIOS if no vscsi mappings were removed
            # from it.
            if not vscsi_rms:
                continue

            # Index remaining VSCSI mappings to isolate still-in-use storage.
            smindex = sm.index_mappings(vwrap.scsi_mappings)

            # Figure out which storage elements need to be removed.
            # o Some VSCSI mappings may not have backing storage.
            # o Ignore any storage elements that are still in use (still have
            # mappings associated with them).
            stg_els_to_remove = [
                rmap.backing_storage for rmap in vscsi_rms if
                rmap.backing_storage is not None and
                rmap.backing_storage.udid not in smindex['by-storage-udid']]

            # If there's nothing left, we're done with this VIOS
            if not stg_els_to_remove:
                continue

            # Extract lists of each type of storage
            vopts_to_rm = []
            vdisks_to_rm = []
            for stg in stg_els_to_remove:
                if isinstance(stg, (stor.LU, stor.PV)):
                    LOG.warn(_("Not removing storage %(stg_name)s of type "
                               "%(stg_type)s because it cannot be determined "
                               "whether it is still in use.  Manual "
                               "verification and cleanup may be necessary."),
                             {'stg_name': stg.name,
                              'stg_type': stg.schema_type})
                elif isinstance(stg, stor.VOptMedia):
                    vopts_to_rm.append(stg)
                elif isinstance(stg, stor.VDisk):
                    vdisks_to_rm.append(stg)
                else:
                    LOG.warn(_("Storage scrub ignoring storage element "
                               "%(stg_name)s because it is of unexpected type "
                               "%(stg_type)s."),
                             {'stg_name': stg.name,
                              'stg_type': stg.schema_type})

            # Any storage to be deleted?
            if not any((vopts_to_rm, vdisks_to_rm)):
                continue

            # If we get here, we have storage that needs to be deleted from one
            # or more volume groups.  We don't have a way of knowing which ones
            # without REST calls, so get all VGs for this VIOS and delete from
            # all of them.  POST will only be done on VGs which actually need
            # updating.
            vgftsk = tx.FeedTask('scrub_vg_vios_%s' % vuuid, stor.VG.getter(
                vwrap.adapter, parent_class=vwrap.__class__,
                parent_uuid=vwrap.uuid))
            if vdisks_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vdisks, vdisks_to_rm, logspec=(LOG.warn, _(
                        "Scrubbing the following %(vdcount)d Virtual Disks "
                        "from VIOS %(vios)s: %(vdlist)s"), {
                        'vdcount': len(vdisks_to_rm), 'vios': vwrap.name,
                        'vdlist': ["%s (%s)" % (vd.name, vd.udid) for vd
                                   in vdisks_to_rm]}))
            if vopts_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vopts, vopts_to_rm, logspec=(LOG.warn, _(
                        "Scrubbing the following %(vocount)d Virtual Opticals "
                        "from VIOS %(vios)s: %(volist)s"), {
                        'vocount': len(vopts_to_rm), 'vios': vwrap.name,
                        'volist': ["%s (%s)" % (vo.name, vo.udid) for vo
                                   in vopts_to_rm]}))
            rmtasks.append(vgftsk)

        # We only created removal Tasks if we found something to remove.
        if rmtasks:
            # Execute any storage removals in parallel, max 8 threads.
            tf_eng.run(
                tf_uf.Flow('remove_storage').add(*rmtasks), engine='parallel',
                executor=tx.ContextThreadPoolExecutor(max(8, len(rmtasks))))
Example #3
0
    def execute(self, wrapper_task_rets):
        """Remove the storage elements associated with the deleted mappings.

        We remove storage elements for each VIOS, but only those we can be sure
        belong ONLY to that VIOS.  That is, we do not remove SSP Logical Units
        because they may be mapped from some other VIOS in the cluster - one we
        don't even know about.
        """
        # Accumulate removal tasks
        rmtasks = []
        for vuuid, rets in wrapper_task_rets.items():
            vwrap = rets['wrapper']
            # VFC mappings don't have storage we can get to, so ignore those.

            # We may get removals from more than one subtask.  All will have
            # the 'vscsi_removals_' prefix.  There may be some overlap, but
            # the removal methods will ignore duplicates.
            vscsi_rms = []
            for vrk in (k for k in rets if k.startswith('vscsi_removals_')):
                vscsi_rms.extend(rets[vrk])

            # We can short out of this VIOS if no vscsi mappings were removed
            # from it.
            if not vscsi_rms:
                continue

            # Index remaining VSCSI mappings to isolate still-in-use storage.
            smindex = sm.index_mappings(vwrap.scsi_mappings)

            # Figure out which storage elements need to be removed.
            # o Some VSCSI mappings may not have backing storage.
            # o Ignore any storage elements that are still in use (still have
            # mappings associated with them).
            stg_els_to_remove = [
                rmap.backing_storage for rmap in vscsi_rms
                if rmap.backing_storage is not None
                and rmap.backing_storage.udid not in smindex['by-storage-udid']
            ]

            # If there's nothing left, we're done with this VIOS
            if not stg_els_to_remove:
                continue

            # Extract lists of each type of storage
            vopts_to_rm = []
            vdisks_to_rm = []
            for stg in stg_els_to_remove:
                if isinstance(stg, (stor.LU, stor.PV)):
                    LOG.warn(
                        _("Not removing storage %(stg_name)s of type "
                          "%(stg_type)s because it cannot be determined "
                          "whether it is still in use.  Manual "
                          "verification and cleanup may be necessary."), {
                              'stg_name': stg.name,
                              'stg_type': stg.schema_type
                          })
                elif isinstance(stg, stor.VOptMedia):
                    vopts_to_rm.append(stg)
                elif isinstance(stg, stor.VDisk):
                    vdisks_to_rm.append(stg)
                else:
                    LOG.warn(
                        _("Storage scrub ignoring storage element "
                          "%(stg_name)s because it is of unexpected type "
                          "%(stg_type)s."), {
                              'stg_name': stg.name,
                              'stg_type': stg.schema_type
                          })

            # Any storage to be deleted?
            if not any((vopts_to_rm, vdisks_to_rm)):
                continue

            # If we get here, we have storage that needs to be deleted from one
            # or more volume groups.  We don't have a way of knowing which ones
            # without REST calls, so get all VGs for this VIOS and delete from
            # all of them.  POST will only be done on VGs which actually need
            # updating.
            vgftsk = tx.FeedTask(
                'scrub_vg_vios_%s' % vuuid,
                stor.VG.getter(vwrap.adapter,
                               parent_class=vwrap.__class__,
                               parent_uuid=vwrap.uuid))
            if vdisks_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vdisks,
                    vdisks_to_rm,
                    logspec=(
                        LOG.warn,
                        _("Scrubbing the following %(vdcount)d Virtual Disks "
                          "from VIOS %(vios)s: %(vdlist)s"), {
                              'vdcount':
                              len(vdisks_to_rm),
                              'vios':
                              vwrap.name,
                              'vdlist': [
                                  "%s (%s)" % (vd.name, vd.udid)
                                  for vd in vdisks_to_rm
                              ]
                          }))
            if vopts_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vopts,
                    vopts_to_rm,
                    logspec=
                    (LOG.warn,
                     _("Scrubbing the following %(vocount)d Virtual Opticals "
                       "from VIOS %(vios)s: %(volist)s"), {
                           'vocount':
                           len(vopts_to_rm),
                           'vios':
                           vwrap.name,
                           'volist': [
                               "%s (%s)" % (vo.name, vo.udid)
                               for vo in vopts_to_rm
                           ]
                       }))
            rmtasks.append(vgftsk)

        # We only created removal Tasks if we found something to remove.
        if rmtasks:
            # Execute any storage removals in parallel, max 8 threads.
            tf_eng.run(tf_uf.Flow('remove_storage').add(*rmtasks),
                       engine='parallel',
                       executor=tx.ContextThreadPoolExecutor(
                           max(8, len(rmtasks))))