예제 #1
0
    def test_lu_vopt_vdisk(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd):
        def verify_rm_stg_call(exp_list):
            def _rm_stg(wrapper, stglist, *a, **k):
                self.assertEqual(len(exp_list), len(stglist))
                for exp, act in zip(exp_list, stglist):
                    self.assertEqual(exp.udid, act.udid)

            return _rm_stg

        warns = [
            mock.call(
                mock.ANY, {
                    'stg_type': 'VSCSI',
                    'lpar_id': 3,
                    'num_maps': 3,
                    'vios_name': self.vio_feed[0].name
                })
        ]

        # We should ignore the LUs...
        mock_rm_lu.side_effect = self.fail
        # ...but should emit a warning about ignoring them
        warns.append(
            mock.call(
                mock.ANY, {
                    'stg_name':
                    'volume-boot-8246L1C_0604CAA-salsman66-00000004',
                    'stg_type': 'LogicalUnit'
                }))

        vorm = self.vio_feed[0].scsi_mappings[5].backing_storage
        mock_rm_vopt.side_effect = verify_rm_stg_call([vorm])
        warns.append(
            mock.call(
                mock.ANY, {
                    'vocount': 1,
                    'vios': self.vio_feed[0].name,
                    'volist'
                    '': ["%s (%s)" % (vorm.name, vorm.udid)]
                }))

        vdrm = self.vio_feed[0].scsi_mappings[8].backing_storage
        mock_rm_vd.side_effect = verify_rm_stg_call([vdrm])
        warns.append(
            mock.call(
                mock.ANY, {
                    'vdcount': 1,
                    'vios': self.vio_feed[0].name,
                    'vdlist'
                    '': ["%s (%s)" % (vdrm.name, vdrm.udid)]
                }))

        ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True)
        # LPAR ID 45 is not represented in the mappings.  Test a) that it is
        # ignored, b) that we can have two separate LPAR storage scrub tasks
        # in the same FeedTask (no duplicate 'provides' names).
        ts.add_lpar_storage_scrub_tasks([45], self.ftsk, lpars_exist=True)
        self.ftsk.execute()
        self.logfx.patchers['warn'].mock.assert_has_calls(warns,
                                                          any_order=True)
예제 #2
0
 def execute(self):
     LOG.info(_LI('Creating instance: %s'), self.instance.name)
     wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
                        self.flavor)
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
                                          lpars_exist=True)
     return wrap
예제 #3
0
    def execute(self):
        data = None
        if self.nvram_mgr is not None:
            LOG.info('Fetching NVRAM.', instance=self.instance)
            data = self.nvram_mgr.fetch(self.instance)
            LOG.debug('NVRAM data is: %s', data, instance=self.instance)

        wrap = vm.create_lpar(self.adapter,
                              self.host_wrapper,
                              self.instance,
                              nvram=data,
                              slot_mgr=self.slot_mgr)
        pvm_stg.add_lpar_storage_scrub_tasks([wrap.id],
                                             self.stg_ftsk,
                                             lpars_exist=True)
        # If the stg_ftsk passed in was None and we initialized a
        # 'create_scrubber' stg_ftsk then run it immediately. We do
        # this because we moved the LPAR storage scrub tasks out of the
        # build_map initialization. This was so that we could construct the
        # build map earlier in the spawn, just before the LPAR is created.
        # Only rebuilds should be passing in None for stg_ftsk.
        if self.stg_ftsk.name == 'create_scrubber':
            LOG.info('Scrubbing storage as part of rebuild.',
                     instance=self.instance)
            self.stg_ftsk.execute()

        return wrap
예제 #4
0
 def execute(self):
     LOG.info(_LI('Creating instance: %s'), self.instance.name)
     wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
                        self.flavor)
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id],
                                          self.stg_ftsk,
                                          lpars_exist=True)
     return wrap
예제 #5
0
파일: vm.py 프로젝트: Juniper/nova
 def execute(self):
     wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance)
     # Get rid of any stale storage and/or mappings associated with the new
     # LPAR's ID, so it doesn't accidentally have access to something it
     # oughtn't.
     LOG.info('Scrubbing stale storage.', instance=self.instance)
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
                                          lpars_exist=True)
     return wrap
예제 #6
0
 def execute(self):
     wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance)
     # Get rid of any stale storage and/or mappings associated with the new
     # LPAR's ID, so it doesn't accidentally have access to something it
     # oughtn't.
     LOG.info('Scrubbing stale storage.', instance=self.instance)
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id],
                                          self.stg_ftsk,
                                          lpars_exist=True)
     return wrap
예제 #7
0
파일: _fc.py 프로젝트: tpeponas/pypowervm
def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER):
    """Attempt to discover a hard disk attached to a Virtual I/O Server.

    See lua_recovery.  This method attempts that call and analyzes the
    results.  On certain failure conditions (see below), this method will find
    stale LPARs, scrub storage artifacts associated with them, and then retry
    lua_recovery.  The retry is only attempted once; that result is returned
    regardless.

    The main objective of this method is to resolve errors resulting from
    incomplete cleanup of previous LPARs.  The stale LPAR's storage mappings
    can cause hdisk discovery to fail because it thinks the hdisk is already in
    use.

    Retry conditions: The scrub-and-retry will be triggered if:
    o dev_name is None; or
    o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR.  (The
      latter is acceptable because it means we discovered some, but not all, of
      the ITLs.  This is okay as long as dev_name is set.)

    :param adapter: The pypowervm adapter.
    :param vios_uuid: The Virtual I/O Server UUID.
    :param itls: A list of ITL objects.
    :param vendor: The vendor for the LUN.  See the LUAType.* constants.
    :return status: The status code from the discover process.
                    See LUAStatus.* constants.
    :return dev_name: The name of the discovered hdisk.
    :return udid: The UDID of the device.
    """
    # First attempt
    status, devname, udid = lua_recovery(adapter,
                                         vios_uuid,
                                         itls,
                                         vendor=vendor)
    # Do we need to scrub and retry?
    if not good_discovery(status, devname):
        vwrap = pvm_vios.VIOS.get(adapter,
                                  uuid=vios_uuid,
                                  xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP))

        scrub_ids = tsk_stg.find_stale_lpars(vwrap)
        if scrub_ids:
            # Detailed warning message by _log_lua_status
            LOG.warning(
                _("hdisk discovery failed; will scrub stale storage "
                  "for LPAR IDs %s and retry."), scrub_ids)
            # Scrub from just the VIOS in question.
            scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
            tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
            scrub_task.execute()
            status, devname, udid = lua_recovery(adapter,
                                                 vios_uuid,
                                                 itls,
                                                 vendor=vendor)
    return status, devname, udid
예제 #8
0
 def execute(self):
     wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance)
     # Get rid of any stale storage and/or mappings associated with the new
     # LPAR's ID, so it doesn't accidentally have access to something it
     # oughtn't.
     ftsk = pvm_tpar.build_active_vio_feed_task(
         self.adapter,
         name='create_scrubber',
         xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], ftsk, lpars_exist=True)
     LOG.info('Scrubbing stale storage.', instance=self.instance)
     ftsk.execute()
예제 #9
0
파일: vm.py 프로젝트: adreznec/nova-powervm
    def execute_impl(self):
        data = None
        if self.nvram_mgr is not None:
            LOG.info(_LI('Fetching NVRAM for instance %s.'),
                     self.instance.name, instance=self.instance)
            data = self.nvram_mgr.fetch(self.instance)
            LOG.debug('NVRAM data is: %s', data, instance=self.instance)

        wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
                           self.flavor, nvram=data)
        pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
                                             lpars_exist=True)
        return wrap
예제 #10
0
 def test_no_matches(self, mock_rm_stg):
     """When removals have no hits, log debug messages, but no warnings."""
     # Our data set has no VFC mappings and no VSCSI mappings with LPAR ID 1
     ts.add_lpar_storage_scrub_tasks([1], self.ftsk, lpars_exist=True)
     self.ftsk.execute()
     self.assertEqual(0, self.logfx.patchers['warn'].mock.call_count)
     for vname in (vwrap.name for vwrap in self.vio_feed):
         self.logfx.patchers['debug'].mock.assert_any_call(
             mock.ANY, dict(stg_type='VSCSI', lpar_id=1, vios_name=vname))
         self.logfx.patchers['debug'].mock.assert_any_call(
             mock.ANY, dict(stg_type='VFC', lpar_id=1, vios_name=vname))
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(1, mock_rm_stg.call_count)
예제 #11
0
 def test_no_matches(self, mock_rm_stg):
     """When removals have no hits, log debug messages, but no warnings."""
     # Our data set has no VFC mappings and no VSCSI mappings with LPAR ID 1
     ts.add_lpar_storage_scrub_tasks([1], self.ftsk, lpars_exist=True)
     self.ftsk.execute()
     self.assertEqual(0, self.logfx.patchers['warn'].mock.call_count)
     for vname in (vwrap.name for vwrap in self.vio_feed):
         self.logfx.patchers['debug'].mock.assert_any_call(
             mock.ANY, dict(stg_type='VSCSI', lpar_id=1, vios_name=vname))
         self.logfx.patchers['debug'].mock.assert_any_call(
             mock.ANY, dict(stg_type='VFC', lpar_id=1, vios_name=vname))
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(1, mock_rm_stg.call_count)
예제 #12
0
 def test_matches_warn(self, mock_rm_vfc_maps):
     """When removals hit, log warnings including the removal count."""
     # Mock vfc remove_maps with a multi-element list to verify num_maps
     mock_rm_vfc_maps.return_value = [1, 2, 3]
     ts.add_lpar_storage_scrub_tasks([32], self.ftsk, lpars_exist=True)
     self.ftsk.execute()
     mock_rm_vfc_maps.assert_has_calls(
         [mock.call(wrp, 32) for wrp in self.vio_feed], any_order=True)
     for vname in (vwrap.name for vwrap in self.vio_feed):
         self.logfx.patchers['warn'].mock.assert_any_call(
             mock.ANY, dict(stg_type='VFC', num_maps=3, lpar_id=32,
                            vios_name=vname))
     self.logfx.patchers['warn'].mock.assert_any_call(
         mock.ANY, dict(stg_type='VSCSI', num_maps=1, lpar_id=32,
                        vios_name='nimbus-ch03-p2-vios1'))
     self.logfx.patchers['debug'].mock.assert_any_call(
         mock.ANY, dict(stg_type='VSCSI', lpar_id=32,
                        vios_name='nimbus-ch03-p2-vios2'))
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
예제 #13
0
    def test_lu_vopt_vdisk(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd):
        def verify_rm_stg_call(exp_list):
            def _rm_stg(wrapper, stglist, *a, **k):
                self.assertEqual(len(exp_list), len(stglist))
                for exp, act in zip(exp_list, stglist):
                    self.assertEqual(exp.udid, act.udid)
            return _rm_stg
        warns = [mock.call(
            mock.ANY, {'stg_type': 'VSCSI', 'lpar_id': 3, 'num_maps': 3,
                       'vios_name': self.vio_feed[0].name})]

        # We should ignore the LUs...
        mock_rm_lu.side_effect = self.fail
        # ...but should emit a warning about ignoring them
        warns.append(mock.call(
            mock.ANY,
            {'stg_name': 'volume-boot-8246L1C_0604CAA-salsman66-00000004',
             'stg_type': 'LogicalUnit'}))

        vorm = self.vio_feed[0].scsi_mappings[5].backing_storage
        mock_rm_vopt.side_effect = verify_rm_stg_call([vorm])
        warns.append(mock.call(
            mock.ANY, {'vocount': 1, 'vios': self.vio_feed[0].name,
                       'volist' '': ["%s (%s)" % (vorm.name, vorm.udid)]}))

        vdrm = self.vio_feed[0].scsi_mappings[8].backing_storage
        mock_rm_vd.side_effect = verify_rm_stg_call([vdrm])
        warns.append(mock.call(
            mock.ANY, {'vdcount': 1, 'vios': self.vio_feed[0].name,
                       'vdlist' '': ["%s (%s)" % (vdrm.name, vdrm.udid)]}))

        ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True)
        # LPAR ID 45 is not represented in the mappings.  Test a) that it is
        # ignored, b) that we can have two separate LPAR storage scrub tasks
        # in the same FeedTask (no duplicate 'provides' names).
        ts.add_lpar_storage_scrub_tasks([45], self.ftsk, lpars_exist=True)
        self.ftsk.execute()
        self.logfx.patchers['warn'].mock.assert_has_calls(
            warns, any_order=True)
예제 #14
0
    def init_recreate_map(self, adapter, vol_drv_iter):
        """To be used on a target system.  Builds the 'slot recreate' map.

        This is to initialize on the target system how the client slots should
        be rebuilt on the client VM.

        This should not be called unless it is a VM recreate.

        :param adapter: The pypowervm adapter.
        :param vol_drv_iter: An iterator of the volume drivers.
        """
        # This should only be called on a rebuild. Focus on being correct
        # first. Performance is secondary.

        # We need to scrub existing stale mappings, including those for the VM
        # we're creating.  It is critical that this happen *before* we create
        # any of the mappings we actually want this VM to have.
        scrub_ftsk = pvm_tstor.ComprehensiveScrub(adapter)
        lpar_id = vm.get_vm_id(adapter, vm.get_pvm_uuid(self.instance))
        pvm_tstor.add_lpar_storage_scrub_tasks([lpar_id], scrub_ftsk,
                                               lpars_exist=True)
        scrub_ftsk.execute()
        self._vios_wraps = scrub_ftsk.feed

        pv_vscsi_vol_to_vio = {}
        fabric_names = []
        for bdm, vol_drv in vol_drv_iter:
            if vol_drv.vol_type() == 'vscsi':
                self._pv_vscsi_vol_to_vio(vol_drv, pv_vscsi_vol_to_vio)
            elif len(fabric_names) == 0 and vol_drv.vol_type() == 'npiv':
                fabric_names = vol_drv._fabric_names()

        # Run the full initialization now that we have the pre-requisite data
        try:
            self._build_map = slot_map.RebuildSlotMap(
                self, self._vios_wraps, pv_vscsi_vol_to_vio, fabric_names)
        except pvm_exc.InvalidHostForRebuild as e:
            raise p_exc.InvalidRebuild(error=six.text_type(e))
예제 #15
0
 def test_matches_warn(self, mock_rm_vfc_maps):
     """When removals hit, log warnings including the removal count."""
     # Mock vfc remove_maps with a multi-element list to verify num_maps
     mock_rm_vfc_maps.return_value = [1, 2, 3]
     ts.add_lpar_storage_scrub_tasks([32], self.ftsk, lpars_exist=True)
     self.ftsk.execute()
     mock_rm_vfc_maps.assert_has_calls(
         [mock.call(wrp, 32) for wrp in self.vio_feed], any_order=True)
     for vname in (vwrap.name for vwrap in self.vio_feed):
         self.logfx.patchers['warn'].mock.assert_any_call(
             mock.ANY,
             dict(stg_type='VFC', num_maps=3, lpar_id=32, vios_name=vname))
     self.logfx.patchers['warn'].mock.assert_any_call(
         mock.ANY,
         dict(stg_type='VSCSI',
              num_maps=1,
              lpar_id=32,
              vios_name='nimbus-ch03-p2-vios1'))
     self.logfx.patchers['debug'].mock.assert_any_call(
         mock.ANY,
         dict(stg_type='VSCSI',
              lpar_id=32,
              vios_name='nimbus-ch03-p2-vios2'))
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
예제 #16
0
 def test_multiple_removals(self, mock_wrap):
     # Pretend LPAR feed is "empty" so we don't skip any removals.
     mock_wrap.return_value = []
     v1 = self.vio_feed[0]
     v2 = self.vio_feed[1]
     v1_map_count = len(v1.scsi_mappings)
     v2_map_count = len(v2.scsi_mappings)
     # Zero removals works
     ts.add_lpar_storage_scrub_tasks([], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     # Removals for which no mappings exist
     ts.add_lpar_storage_scrub_tasks([71, 72, 76, 77], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     # Remove some from each VIOS
     self.assertEqual(v1_map_count, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count, len(v2.scsi_mappings))
     ts.add_lpar_storage_scrub_tasks([3, 37, 80, 7, 27, 85], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(v1_map_count - 3, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count - 3, len(v2.scsi_mappings))
     # Now make the LPAR feed hit some of the removals.  They should be
     # skipped.
     self.txfx.patchers['update'].mock.reset_mock()
     v1_map_count = len(v1.scsi_mappings)
     v2_map_count = len(v2.scsi_mappings)
     mock_wrap.return_value = [mock.Mock(id=i) for i in (4, 5, 8, 11)]
     ts.add_lpar_storage_scrub_tasks([4, 5, 6, 8, 11, 12], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(v1_map_count - 1, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count - 1, len(v2.scsi_mappings))
     # Make sure the right ones were ignored
     v1_map_lids = [sm.server_adapter.lpar_id for sm in v1.scsi_mappings]
     v2_map_lids = [sm.server_adapter.lpar_id for sm in v2.scsi_mappings]
     self.assertIn(4, v1_map_lids)
     self.assertIn(5, v1_map_lids)
     self.assertIn(8, v2_map_lids)
     self.assertIn(11, v2_map_lids)
     # ...and the right ones were removed
     self.assertNotIn(6, v1_map_lids)
     self.assertNotIn(12, v2_map_lids)
예제 #17
0
 def test_multiple_removals(self, mock_wrap):
     # Pretend LPAR feed is "empty" so we don't skip any removals.
     mock_wrap.return_value = []
     v1 = self.vio_feed[0]
     v2 = self.vio_feed[1]
     v1_map_count = len(v1.scsi_mappings)
     v2_map_count = len(v2.scsi_mappings)
     # Zero removals works
     ts.add_lpar_storage_scrub_tasks([], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     # Removals for which no mappings exist
     ts.add_lpar_storage_scrub_tasks([71, 72, 76, 77], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(0, self.txfx.patchers['update'].mock.call_count)
     # Remove some from each VIOS
     self.assertEqual(v1_map_count, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count, len(v2.scsi_mappings))
     ts.add_lpar_storage_scrub_tasks([3, 37, 80, 7, 27, 85], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(v1_map_count - 3, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count - 3, len(v2.scsi_mappings))
     # Now make the LPAR feed hit some of the removals.  They should be
     # skipped.
     self.txfx.patchers['update'].mock.reset_mock()
     v1_map_count = len(v1.scsi_mappings)
     v2_map_count = len(v2.scsi_mappings)
     mock_wrap.return_value = [mock.Mock(id=i) for i in (4, 5, 8, 11)]
     ts.add_lpar_storage_scrub_tasks([4, 5, 6, 8, 11, 12], self.ftsk)
     self.ftsk.execute()
     self.assertEqual(2, self.txfx.patchers['update'].mock.call_count)
     self.assertEqual(v1_map_count - 1, len(v1.scsi_mappings))
     self.assertEqual(v2_map_count - 1, len(v2.scsi_mappings))
     # Make sure the right ones were ignored
     v1_map_lids = [sm.server_adapter.lpar_id for sm in v1.scsi_mappings]
     v2_map_lids = [sm.server_adapter.lpar_id for sm in v2.scsi_mappings]
     self.assertIn(4, v1_map_lids)
     self.assertIn(5, v1_map_lids)
     self.assertIn(8, v2_map_lids)
     self.assertIn(11, v2_map_lids)
     # ...and the right ones were removed
     self.assertNotIn(6, v1_map_lids)
     self.assertNotIn(12, v2_map_lids)
예제 #18
0
def discover_iscsi(adapter,
                   host_ip,
                   user,
                   password,
                   iqn,
                   vios_uuid,
                   transport_type=None,
                   lunid=None,
                   iface_name=None,
                   auth=None,
                   discovery_auth=None,
                   discovery_username=None,
                   discovery_password=None,
                   multipath=False):
    """Initiates the iSCSI discovery and login job

    :param adapter: pypowervm adapter
    :param host_ip: The portal or list of portals for the iscsi target. A
                    portal looks like ip:port.
    :param user: The username needed for authentication.
    :param password: The password needed for authentication.
    :param iqn: The IQN (iSCSI Qualified Name) or list of IQNs for the created
                volume on the target (e.g. iqn.2016-06.world.srv:target00).
    :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type).
    :param transport_type: (Deprecated) Transport type of the volume to be
                           connected. Use iface_name instead.
    :param lunid: Target LUN ID or list of LUN IDs for the volume.
    :param iface_name: Iscsi iface name to use for the connection.
    :param auth: Authentication type
    :param discovery_auth: Discovery authentication type.
    :param discovery_username: The username needed for discovery
                               authentication.
    :param discovery_password: The password needed for discovery
                               authentication.
    :param multipath: Whether the connection is multipath or not.
    :return: The device name of the created volume.
    :return: The UniqueDeviceId of the create volume.
    :raise: ISCSIDiscoveryFailed in case of bad return code.
    :raise: JobRequestFailed in case of failure
    """

    kwargs = {
        'user': user,
        'password': password,
        'iqn': iqn,
        'transport_type': transport_type,
        'lunid': lunid,
        'iface_name': iface_name,
        'auth': auth,
        'discovery_auth': discovery_auth,
        'discovery_username': discovery_username,
        'discovery_password': discovery_password
    }

    status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid,
                                            multipath, **kwargs)
    if status:
        _log_iscsi_status(status)

    # If status is ISCSI_ERR_ODM_QUERY, then there are chance of stale iscsi
    # disks, cleanup and re-discover.
    if status == ISCSIStatus.ISCSI_ERR_ODM_QUERY:
        vwrap = VIOS.get(adapter, uuid=vios_uuid, xag=[c.XAG.VIO_SMAP])
        # Check for stale lpars with SCSI mappings
        scrub_ids = tsk_stg.find_stale_lpars(vwrap)
        if scrub_ids:
            LOG.info(
                _("Scrub stale storage for LPAR IDs %s and "
                  "retry iSCSI discovery."), scrub_ids)
            # Scrub from just the VIOS in question.
            scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
            tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
            scrub_task.execute()

        # iSCSI Discover does not autoclean the hdisk, so remove iscsi hdisk.
        remove_iscsi(adapter, iqn, vios_uuid, iface_name, lunid, host_ip,
                     multipath)

        # Re-discover the volume
        status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid,
                                                multipath, **kwargs)

    if not good_discovery(status, devname):
        raise pexc.ISCSIDiscoveryFailed(vios_uuid=vios_uuid, status=status)

    return devname, udid