Пример #1
0
    def test_exceptions(self):
        def bad1(wrapper, s):
            bad2(wrapper)

        def bad2(wrapper):
            bad3(wrapper.field)

        def bad3(tag):
            raise IOError("this is an exception on %s!" % tag)

        # With one entry in the feed, one exception should be raised, and it
        # should bubble up as normal.
        feed = [mock.Mock(spec=lpar.LPAR, field='lpar1')]
        ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad')

        flow = tf_uf.Flow('the flow')
        flow.add(ft)
        self.assertRaises(IOError, tf_eng.run, flow)

        # With multiple entries in the feed, TaskFlow will wrap the exceptions
        # in a WrappedFailure.  We should repackage it, and the message in the
        # resulting MultipleExceptionsInFeedTask should contain all the
        # exception messages.
        feed.append(mock.Mock(spec=lpar.LPAR, field='lpar2'))
        ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad')

        flow = tf_uf.Flow('the flow')
        flow.add(ft)
        with self.assertRaises(ex.MultipleExceptionsInFeedTask) as mult_ex:
            tf_eng.run(flow)

        # Make sure the wrapped exception messages show up in the exception.
        self.assertIn('exception on lpar1!', mult_ex.exception.args[0])
        self.assertIn('exception on lpar2!', mult_ex.exception.args[0])
Пример #2
0
 def setUp(self):
     super(TestScrub3, self).setUp()
     self.adpt = self.useFixture(fx.AdapterFx()).adpt
     self.vio_feed = [vios.VIOS.wrap(tju.load_file(VIOS_ENTRY2, self.adpt))]
     self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed))
     self.logfx = self.useFixture(fx.LoggingFx())
     self.ftsk = tx.FeedTask('scrub', self.vio_feed)
Пример #3
0
    def dlt_vopt(self, lpar_uuid, stg_ftsk=None):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: (Optional) A FeedTask. If provided, the actions to
                         modify the storage will be added as batched functions
                         onto the FeedTask.  If not provided (the default) the
                         operation to delete the vOpt will execute immediately.
        """
        # If no transaction manager, build locally so that we can run
        # immediately
        if stg_ftsk is None:
            built_stg_ftsk = True
            vio_resp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                         root_id=self.vios_uuid,
                                         xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])
            vio_w = pvm_vios.VIOS.wrap(vio_resp)
            stg_ftsk = pvm_tx.FeedTask('media_detach', [vio_w])
        else:
            built_stg_ftsk = False

        # Run the remove maps method.
        self.add_dlt_vopt_tasks(lpar_uuid, stg_ftsk)

        # If built locally, then execute
        if built_stg_ftsk:
            stg_ftsk.execute()
Пример #4
0
    def test_deferred_feed_get(self):
        """Test deferred and unique GET of the internal feed."""
        # setUp inits self.feed_task with FeedGetter.  This doesn't call read.
        self.assertEqual(0, self.adpt.read.call_count)
        lfeed = self.feed_task.feed
        self.assertEqual(1, self.adpt.read.call_count)
        self.adpt.read.assert_called_with('LogicalPartition',
                                          None,
                                          child_id=None,
                                          child_type=None,
                                          xag=None)
        self.assertEqual(21, len(lfeed))
        self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid)
        # Getting feed again doesn't invoke GET again.
        lfeed = self.feed_task.feed
        self.assertEqual(1, self.adpt.read.call_count)
        self.assertEqual(21, len(lfeed))
        self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid)

        # Init with a feed - read is never called
        self.adpt.read.reset_mock()
        ftsk = tx.FeedTask('name', lfeed)
        self.assertEqual(0, self.adpt.read.call_count)
        nfeed = ftsk.feed
        self.assertEqual(0, self.adpt.read.call_count)
        self.assertEqual(lfeed, nfeed)
Пример #5
0
    def _cleanup_volume(self, udid):
        """Cleanup the hdisk associated with this udid."""

        if not udid:
            LOG.warning(
                _LW('Could not remove hdisk for volume: %s') % self.volume_id)
            return

        LOG.info(_LI('Removing hdisk for udid: %s') % udid)

        def find_hdisk_to_remove(vios_w):
            device_name = vios_w.hdisk_from_uuid(udid)
            if device_name is None:
                return
            LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'), {
                'hdisk': device_name,
                'vios': vios_w.name
            })
            self._add_remove_hdisk(vios_w,
                                   device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove',
            pvm_vios.VIOS.getter(self.adapter,
                                 xag=[pvm_vios.VIOS.xags.STORAGE]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(find_hdisk_to_remove,
                                           flag_update=False)
        rmv_hdisk_ftsk.execute()
Пример #6
0
    def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar,
                       mock_cdrb, mock_cfg_drv):
        """Validates the 'typical' spawn flow of the spawn of an instance. """
        mock_cdrb.return_value = True
        self.drv.host_wrapper = mock.Mock(uuid='host_uuid')
        self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
                                                 instance=True)
        mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
        mock_bldftsk.return_value = mock_ftsk
        self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
                       'allocs', network_info='netinfo')
        mock_crt_lpar.assert_called_once_with(
            self.adp, self.drv.host_wrapper, self.inst)
        mock_bldftsk.assert_called_once_with(
            self.adp, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
        mock_scrub.assert_called_once_with(
            [mock_crt_lpar.return_value.id], mock_ftsk, lpars_exist=True)
        self.drv.disk_dvr.create_disk_from_image.assert_called_once_with(
            'context', self.inst, 'img_meta')
        self.drv.disk_dvr.attach_disk.assert_called_once_with(
            self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
            mock_ftsk)
        mock_cfg_drv.assert_called_once_with(self.adp)
        mock_cfg_drv.return_value.create_cfg_drv_vopt.assert_called_once_with(
            self.inst, 'files', 'netinfo', mock_ftsk, admin_pass='******')
        self.pwron.assert_called_once_with(self.adp, self.inst)

        mock_cfg_drv.reset_mock()

        # No config drive
        mock_cdrb.return_value = False
        self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
                       'allocs')
        mock_cfg_drv.assert_not_called()
Пример #7
0
    def test_post_exec(self):
        def log_func(msg):
            def _log(*a, **k):
                ftfx.log(msg)

            return _log

        def log_task(msg):
            return tf_task.FunctorTask(log_func(msg), name='functor_%s' % msg)

        # Limit the feed to two to keep the logging sane
        ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2]))
        # Make the logging predictable by limiting to one thread
        ftsk = tx.FeedTask('post_exec', lpar.LPAR.getter(None), max_workers=1)

        # First prove that a FeedTask with *only* post-execs can run.
        ftsk.add_post_execute(log_task('post1'))
        ftsk.add_post_execute(log_task('post2'))
        ftsk.execute()
        # Note that no GETs or locks happen
        self.assertEqual(['post1', 'post2'], ftfx.get_log())

        # Now add regular subtasks
        ftfx.reset_log()
        ftsk.add_functor_subtask(log_func('main1'))
        ftsk.add_functor_subtask(log_func('main2'))
        ftsk.execute()
        # One GET, up front.  Posts happen at the end.
        self.assertEqual([
            'get', 'lock', 'main1', 'main2', 'unlock', 'lock', 'main1',
            'main2', 'unlock', 'post1', 'post2'
        ], ftfx.get_log())
Пример #8
0
    def dlt_vopt(self, lpar_uuid, stg_ftsk=None, remove_mappings=True):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: (Optional) A FeedTask. If provided, the actions to
                         modify the storage will be added as batched functions
                         onto the FeedTask.  If not provided (the default) the
                         operation to delete the vOpt will execute immediately.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # If no transaction manager, build locally so that we can run
        # immediately
        if stg_ftsk is None:
            built_stg_ftsk = True
            vio_resp = self.adapter.read(
                pvm_vios.VIOS.schema_type, root_id=self.vios_uuid,
                xag=[pvm_vios.VIOS.xags.SCSI_MAPPING])
            vio_w = pvm_vios.VIOS.wrap(vio_resp)
            stg_ftsk = pvm_tx.FeedTask('media_detach', [vio_w])
        else:
            built_stg_ftsk = False

        # Run the remove maps method.
        self.add_dlt_vopt_tasks(lpar_uuid, stg_ftsk,
                                remove_mappings=remove_mappings)

        # If built locally, then execute
        if built_stg_ftsk:
            stg_ftsk.execute()
Пример #9
0
def build_tx_feed_task(adapter,
                       host_uuid,
                       name='vio_feed_mgr',
                       xag=[
                           pvm_vios.VIOS.xags.STORAGE,
                           pvm_vios.VIOS.xags.SCSI_MAPPING,
                           pvm_vios.VIOS.xags.FC_MAPPING
                       ]):
    """Builds the pypowervm transaction FeedTask.

    The transaction FeedTask enables users to collect a set of
    'WrapperTasks' against a feed of entities (in this case a set of VIOSes).
    The WrapperTask (within the FeedTask) handles lock and retry.

    This is useful to batch together a set of updates across a feed of elements
    (and multiple updates within a given wrapper).  This allows for significant
    performance improvements.

    :param adapter: The pypowervm adapter for the query.
    :param host_uuid: The host server's UUID.
    :param name: (Optional) The name of the feed manager.  Defaults to
                 vio_feed_mgr.
    :param xag: (Optional) List of extended attributes to use.  If not passed
                in defaults to all storage options (as this is most common
                case for using a transaction manager).
    """
    return pvm_tx.FeedTask(name, get_active_vioses(adapter, host_uuid,
                                                   xag=xag))
Пример #10
0
    def _cleanup_volume(self, udid=None, devname=None):
        """Cleanup the hdisk associated with this udid."""

        if not udid and not devname:
            LOG.warning('Could not remove hdisk for volume %s', self.volume_id,
                        instance=self.instance)
            return

        LOG.info('Removing hdisk for udid: %s', udid, instance=self.instance)

        def find_hdisk_to_remove(vios_w):
            if devname is None:
                device_name = vios_w.hdisk_from_uuid(udid)
            else:
                device_name = devname
            if device_name is None:
                return
            LOG.info('Adding deferred task to remove %(hdisk)s from VIOS '
                     '%(vios)s.', {'hdisk': device_name, 'vios': vios_w.name},
                     instance=self.instance)
            self._add_remove_hdisk(vios_w, device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove', pvm_vios.VIOS.getter(
                self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(
            find_hdisk_to_remove, flag_update=False)
        rmv_hdisk_ftsk.execute()
Пример #11
0
 def test_empty_feed(self, mock_get):
     mock_get.return_value = []
     # We're allowed to initialize it with a FeedGetter
     fm = tx.FeedTask('name', ewrap.FeedGetter('mock', ewrap.EntryWrapper))
     # But as soon as we call a 'greedy' method, which does a .get, we raise
     self.assertRaises(ex.FeedTaskEmptyFeed, fm.get_wrapper, 'uuid')
     # Init with an explicit empty feed (list) raises right away
     self.assertRaises(ex.FeedTaskEmptyFeed, tx.FeedTask, 'name', [])
Пример #12
0
    def _connect_volume(self, slot_mgr):
        """Connects the volume.

        :param connect_volume_to_vio: Function to connect a volume to the vio.
                                      :param vios_w: Vios wrapper.
                                      :return: True if mapping was created.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio',
            pvm_vios.VIOS.getter(
                self.adapter,
                xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(self._connect_volume_to_vio,
                                         slot_mgr,
                                         provides='vio_modified',
                                         flag_update=False)

        ret = connect_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

        # Update the slot information
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, self._get_udid(), partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self._validate_vios_on_connection(vioses_modified)
        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_slot_info,
                             name='hdisk_slot_%s' % self._get_udid()))
Пример #13
0
    def _connect_volume(self):
        """Connects the volume."""
        def connect_volume_to_vio(vios_w):
            """Attempts to connect a volume to a given VIO.

            :param vios_w: The Virtual I/O Server wrapper to connect to.
            :return: True if the volume was connected.  False if the volume was
                     not (could be the Virtual I/O Server does not have
                     connectivity to the hdisk).
            """
            status, device_name, udid = self._discover_volume_on_vios(
                vios_w, self.volume_id)

            if hdisk.good_discovery(status, device_name):
                # Found a hdisk on this Virtual I/O Server.  Add the action to
                # map it to the VM when the stg_ftsk is executed.
                with lockutils.lock(hash(self)):
                    self._add_append_mapping(vios_w.uuid, device_name)

                # Save the UDID for the disk in the connection info.  It is
                # used for the detach.
                self._set_udid(udid)
                LOG.debug('Device attached: %s', device_name)

                # Valid attachment
                return True

            return False

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio',
            pvm_vios.VIOS.getter(self.adapter,
                                 xag=[pvm_vios.VIOS.xags.STORAGE]))
        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(connect_volume_to_vio,
                                         provides='vio_modified',
                                         flag_update=False)
        ret = connect_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1
        self._validate_vios_on_connection(vioses_modified)
Пример #14
0
def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER):
    """Attempt to discover a hard disk attached to a Virtual I/O Server.

    See lua_recovery.  This method attempts that call and analyzes the
    results.  On certain failure conditions (see below), this method will find
    stale LPARs, scrub storage artifacts associated with them, and then retry
    lua_recovery.  The retry is only attempted once; that result is returned
    regardless.

    The main objective of this method is to resolve errors resulting from
    incomplete cleanup of previous LPARs.  The stale LPAR's storage mappings
    can cause hdisk discovery to fail because it thinks the hdisk is already in
    use.

    Retry conditions: The scrub-and-retry will be triggered if:
    o dev_name is None; or
    o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR.  (The
      latter is acceptable because it means we discovered some, but not all, of
      the ITLs.  This is okay as long as dev_name is set.)

    :param adapter: The pypowervm adapter.
    :param vios_uuid: The Virtual I/O Server UUID.
    :param itls: A list of ITL objects.
    :param vendor: The vendor for the LUN.  See the LUAType.* constants.
    :return status: The status code from the discover process.
                    See LUAStatus.* constants.
    :return dev_name: The name of the discovered hdisk.
    :return udid: The UDID of the device.
    """
    # First attempt
    status, devname, udid = lua_recovery(adapter,
                                         vios_uuid,
                                         itls,
                                         vendor=vendor)
    # Do we need to scrub and retry?
    if not good_discovery(status, devname):
        vwrap = pvm_vios.VIOS.get(adapter,
                                  uuid=vios_uuid,
                                  xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP))

        scrub_ids = tsk_stg.find_stale_lpars(vwrap)
        if scrub_ids:
            # Detailed warning message by _log_lua_status
            LOG.warning(
                _("hdisk discovery failed; will scrub stale storage "
                  "for LPAR IDs %s and retry."), scrub_ids)
            # Scrub from just the VIOS in question.
            scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
            tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
            scrub_task.execute()
            status, devname, udid = lua_recovery(adapter,
                                                 vios_uuid,
                                                 itls,
                                                 vendor=vendor)
    return status, devname, udid
Пример #15
0
    def test_subtask_thread_local(self):
        """Security context and locks, if set, propagates to WrapperTasks."""
        def verify_no_ctx(wrapper):
            self.assertIsNone(ctx.get_current())

        tx.FeedTask('test_no_context', lpar.LPAR.getter(
            self.adpt)).add_functor_subtask(verify_no_ctx).execute()

        def verify_ctx(wrapper):
            _context = ctx.get_current()
            self.assertIsNotNone(_context)
            self.assertEqual('123', _context.request_id)
            # Copy the base set of locks to expect
            our_locks = list(locks)
            # Add our wrappers uuid since that will be set also.
            our_locks.append(wrapper.uuid)
            self.assertEqual(set(our_locks), set(tx._get_locks()))

        ctx.RequestContext(request_id='123')
        locks = ['L123', 'L456', 'L789']
        tx._set_locks(locks)
        tx.FeedTask('test_set_context', lpar.LPAR.getter(
            self.adpt)).add_functor_subtask(verify_ctx).execute()

        # Context propagates even if FeedTask is executed in a subthread, as
        # long as our executor is used.
        # Make two to ensure they're run in separate threads
        ft1 = tx.FeedTask('subthread1', lpar.LPAR.getter(
            self.adpt)).add_functor_subtask(verify_ctx)
        ft2 = tx.FeedTask('subthread2', lpar.LPAR.getter(
            self.adpt)).add_functor_subtask(verify_ctx)
        self.assertRaises(tf_ex.WrappedFailure,
                          tf_eng.run,
                          tf_uf.Flow('subthread_flow').add(ft1, ft2),
                          engine='parallel')
        tf_eng.run(tf_uf.Flow('subthread_flow').add(ft1, ft2),
                   engine='parallel',
                   executor=tx.ContextThreadPoolExecutor(2))
Пример #16
0
    def attach_volume(self):
        """Attaches the volume."""

        # Check if the VM is in a state where the attach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeAttachFailed(volume_id=self.volume_id,
                                         reason=reason)

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        attach_ftsk = pvm_tx.FeedTask(
            'attach_volume_to_vio',
            pvm_vios.VIOS.getter(
                self.adapter,
                xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        attach_ftsk.add_functor_subtask(self._attach_volume_to_vio,
                                        provides='vio_modified',
                                        flag_update=False)

        ret = attach_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        # Validate that a vios was found
        if vioses_modified == 0:
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') % {
                         'volume_id': self.volume_id
                     })
            ex_args = {'volume_id': self.volume_id, 'reason': msg}
            raise exc.VolumeAttachFailed(**ex_args)

        self.stg_ftsk.execute()
Пример #17
0
    def reset_stg_ftsk(self, stg_ftsk=None):
        """Resets the pypowervm transaction FeedTask to a new value.

        The previous updates from the original FeedTask WILL NOT be migrated
        to this new FeedTask.

        :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
                         I/O Operations.  If provided, the Virtual I/O Server
                         mapping updates will be added to the FeedTask.  This
                         defers the updates to some later point in time.  If
                         the FeedTask is not provided, the updates will be run
                         immediately when this method is executed.
        """
        if stg_ftsk is None:
            getter = pvm_vios.VIOS.getter(self.adapter, xag=self.min_xags())
            self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter)
        else:
            self.stg_ftsk = stg_ftsk
Пример #18
0
    def detach_volume(self):
        """Detach the volume."""

        # Check if the VM is in a state where the detach is acceptable.
        lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
        capable, reason = lpar_w.can_modify_io()
        if not capable:
            raise exc.VolumeDetachFailed(volume_id=self.volume_id,
                                         reason=reason)

        # Run the detach
        try:
            # See logic in attach_volume for why this new FeedTask is here.
            detach_ftsk = pvm_tx.FeedTask(
                'detach_volume_from_vio',
                pvm_vios.VIOS.getter(
                    self.adapter,
                    xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))
            # Find hdisks to detach
            detach_ftsk.add_functor_subtask(self._detach_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)

            ret = detach_ftsk.execute()

            # Warn if no hdisks detached.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Detach Volume: Failed to detach the "
                    "volume %(volume_id)s on ANY of the Virtual "
                    "I/O Servers.", {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {'volume_id': self.volume_id, 'reason': str(e)}
            raise exc.VolumeDetachFailed(**ex_args)
        self.stg_ftsk.execute()
Пример #19
0
 def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar):
     """Validates the 'typical' spawn flow of the spawn of an instance. """
     self.drv.host_wrapper = 'sys'
     self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
                                              instance=True)
     mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
     mock_bldftsk.return_value = mock_ftsk
     self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password')
     mock_crt_lpar.assert_called_once_with(self.adp, 'sys', self.inst)
     mock_bldftsk.assert_called_once_with(
         self.adp, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
     mock_scrub.assert_called_once_with([mock_crt_lpar.return_value.id],
                                        mock_ftsk,
                                        lpars_exist=True)
     self.drv.disk_dvr.create_disk_from_image.assert_called_once_with(
         'context', self.inst, 'img_meta')
     self.drv.disk_dvr.attach_disk.assert_called_once_with(
         self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
         mock_ftsk)
     self.pwron.assert_called_once_with(self.adp, self.inst)
Пример #20
0
def build_active_vio_feed_task(adapter,
                               name='vio_feed_task',
                               xag=(c.XAG.VIO_STOR, c.XAG.VIO_SMAP,
                                    c.XAG.VIO_FMAP)):
    """Builds a FeedTask for all active VIOSes.

    The transaction FeedTask enables users to collect a set of 'WrapperTasks'
    against a feed of entities (in this case a set of active VIOSes). The
    WrapperTask (within the FeedTask) handles lock and retry.

    This is useful to batch together a set of updates across a feed of elements
    (and multiple updates within a given wrapper).  This allows for significant
    performance improvements.

    :param adapter: The pypowervm adapter for the query.
    :param name: (Optional) The name of the feed manager.  Defaults to
                 vio_feed_task.
    :param xag: (Optional) Iterable of extended attributes to use.  If not
                specified, defaults to all mapping/storage options (as this is
                most common case for using a transaction manager).
    :raise NotEnoughActiveVioses: if there is not at least one active VIOS.
    """
    return tx.FeedTask(name, get_active_vioses(adapter, xag=xag, find_min=1))
Пример #21
0
    def test_wrapper_task_rets(self):
        # Limit the feed to two to keep the return size sane
        ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2]))
        ftsk = tx.FeedTask('subtask_rets',
                           lpar.LPAR.getter(None),
                           update_timeout=123)
        exp_wtr = {
            wrp.uuid: {
                'wrapper': wrp,
                'the_id': wrp.id,
                'the_name': wrp.name
            }
            for wrp in ftsk.feed
        }
        called = []

        def return_wrapper_name(wrapper):
            return wrapper.name

        def return_wrapper_id(wrapper):
            return wrapper.id

        def verify_rets_implicit(wrapper_task_rets):
            called.append('implicit')
            self.assertEqual(exp_wtr, wrapper_task_rets)
            return 'verify_rets_implicit_return'

        def verify_rets_explicit(**kwargs):
            called.append('explicit')
            self.assertEqual(exp_wtr, kwargs['wrapper_task_rets'])
            return 'verify_rets_explicit_return'

        ftsk.add_functor_subtask(return_wrapper_name, provides='the_name')
        ftsk.add_functor_subtask(return_wrapper_id, provides='the_id')
        # Execute once here to make sure the return is in the right shape when
        # there are no post-execs
        self.assertEqual(
            {
                'wrapper_task_rets': {
                    self.entries[0].uuid: {
                        'the_name': self.entries[0].name,
                        'the_id': self.entries[0].id,
                        'wrapper': self.entries[0]
                    },
                    self.entries[1].uuid: {
                        'the_name': self.entries[1].name,
                        'the_id': self.entries[1].id,
                        'wrapper': self.entries[1]
                    }
                }
            }, ftsk.execute())

        ftsk.add_post_execute(
            tf_task.FunctorTask(verify_rets_implicit,
                                provides='post_exec_implicit'))
        ftsk.add_post_execute(
            tf_task.FunctorTask(verify_rets_explicit,
                                requires='wrapper_task_rets',
                                provides='post_exec_explicit'))

        ret = ftsk.execute()
        # Make sure the post-execs actually ran (to guarantee their internal
        # assertions passed).
        self.assertEqual(['implicit', 'explicit'], called)
        ftfx.patchers['update'].mock.assert_called_with(mock.ANY, timeout=123)
        # Verify that we got the returns from the subtasks AND the post-execs
        self.assertEqual(
            {
                'wrapper_task_rets': {
                    self.entries[0].uuid: {
                        'the_name': self.entries[0].name,
                        'the_id': self.entries[0].id,
                        'wrapper': self.entries[0]
                    },
                    self.entries[1].uuid: {
                        'the_name': self.entries[1].name,
                        'the_id': self.entries[1].id,
                        'wrapper': self.entries[1]
                    }
                },
                'post_exec_implicit': 'verify_rets_implicit_return',
                'post_exec_explicit': 'verify_rets_explicit_return'
            }, ret)
Пример #22
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        This is the actual method to implement within the subclass.  Some
        transaction maintenance is done by the parent class.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            try:
                device_name = self._get_devname()

                if not device_name:
                    # We lost our bdm data.

                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on future
                    # deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                target_iqn = self.connection_info["data"]["target_iqn"]

                def logout():
                    hdisk.remove_iscsi(self.adapter, target_iqn, vios_w.uuid)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(logout,
                                     name='remove_iSCSI_%s' % target_iqn))
            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Disconnect Volume: Failed to disconnect the volume "
                    "%(volume_id)s on ANY of the Virtual I/O Servers.",
                    {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
Пример #23
0
    def _disconnect_volume(self):
        """Disconnect the volume."""
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()
                if not udid:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                if udid and not device_name:
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not device_name:
                    LOG.warn(
                        _LW("Disconnect Volume: No mapped device found on Virtual "
                            "I/O Server %(vios)s for volume %(volume_id)s.  "
                            "Volume UDID: %(volume_uid)s"), {
                                'volume_uid': udid,
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            })
                    return False

            except Exception as e:
                LOG.warn(
                    _LW("Disconnect Volume: Failed to find disk on Virtual I/O "
                        "Server %(vios_name)s for volume %(volume_id)s. Volume "
                        "UDID: %(volume_uid)s.  Error: %(error)s"), {
                            'error': e,
                            'volume_uid': udid,
                            'vios_name': vios_w.name,
                            'volume_id': self.volume_id
                        })
                return False

            # We have found the device name
            LOG.info(
                _LI("Disconnect Volume: Discovered the device %(hdisk)s "
                    "on Virtual I/O Server %(vios_name)s for volume "
                    "%(volume_id)s.  Volume UDID: %(volume_uid)s."), {
                        'volume_uid': udid,
                        'volume_id': self.volume_id,
                        'vios_name': vios_w.name,
                        'hdisk': device_name
                    })

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name)

                # Add a step after the mapping removal to also remove the
                # hdisk.
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_vios.VIOS.xags.STORAGE]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warn(
                    _LW("Disconnect Volume: Failed to disconnect the "
                        "volume %(volume_id)s on ANY of the Virtual I/O "
                        "Servers for instance %(inst)s."), {
                            'inst': self.instance.name,
                            'volume_id': self.volume_id
                        })

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
                      self.vm_uuid)
            LOG.exception(_LE('Error: %s'), e)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
Пример #24
0
def discover_iscsi(adapter,
                   host_ip,
                   user,
                   password,
                   iqn,
                   vios_uuid,
                   transport_type=None,
                   lunid=None,
                   iface_name=None,
                   auth=None,
                   discovery_auth=None,
                   discovery_username=None,
                   discovery_password=None,
                   multipath=False):
    """Initiates the iSCSI discovery and login job

    :param adapter: pypowervm adapter
    :param host_ip: The portal or list of portals for the iscsi target. A
                    portal looks like ip:port.
    :param user: The username needed for authentication.
    :param password: The password needed for authentication.
    :param iqn: The IQN (iSCSI Qualified Name) or list of IQNs for the created
                volume on the target (e.g. iqn.2016-06.world.srv:target00).
    :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type).
    :param transport_type: (Deprecated) Transport type of the volume to be
                           connected. Use iface_name instead.
    :param lunid: Target LUN ID or list of LUN IDs for the volume.
    :param iface_name: Iscsi iface name to use for the connection.
    :param auth: Authentication type
    :param discovery_auth: Discovery authentication type.
    :param discovery_username: The username needed for discovery
                               authentication.
    :param discovery_password: The password needed for discovery
                               authentication.
    :param multipath: Whether the connection is multipath or not.
    :return: The device name of the created volume.
    :return: The UniqueDeviceId of the create volume.
    :raise: ISCSIDiscoveryFailed in case of bad return code.
    :raise: JobRequestFailed in case of failure
    """

    kwargs = {
        'user': user,
        'password': password,
        'iqn': iqn,
        'transport_type': transport_type,
        'lunid': lunid,
        'iface_name': iface_name,
        'auth': auth,
        'discovery_auth': discovery_auth,
        'discovery_username': discovery_username,
        'discovery_password': discovery_password
    }

    status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid,
                                            multipath, **kwargs)
    if status:
        _log_iscsi_status(status)

    # If status is ISCSI_ERR_ODM_QUERY, then there are chance of stale iscsi
    # disks, cleanup and re-discover.
    if status == ISCSIStatus.ISCSI_ERR_ODM_QUERY:
        vwrap = VIOS.get(adapter, uuid=vios_uuid, xag=[c.XAG.VIO_SMAP])
        # Check for stale lpars with SCSI mappings
        scrub_ids = tsk_stg.find_stale_lpars(vwrap)
        if scrub_ids:
            LOG.info(
                _("Scrub stale storage for LPAR IDs %s and "
                  "retry iSCSI discovery."), scrub_ids)
            # Scrub from just the VIOS in question.
            scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap])
            tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task)
            scrub_task.execute()

        # iSCSI Discover does not autoclean the hdisk, so remove iscsi hdisk.
        remove_iscsi(adapter, iqn, vios_uuid, iface_name, lunid, host_ip,
                     multipath)

        # Re-discover the volume
        status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid,
                                                multipath, **kwargs)

    if not good_discovery(status, devname):
        raise pexc.ISCSIDiscoveryFailed(vios_uuid=vios_uuid, status=status)

    return devname, udid
Пример #25
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            udid = self._get_udid()
            try:
                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # Check if the hdisk is in a bad state in the I/O Server.
                    # Subsequent scrub code on future deploys will clean it up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  This may be the result of "
                            "an evacuate.", {
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            },
                            instance=self.instance)
                        return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id,
                        'volume_uid': udid
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.  Volume UDID: %(volume_uid)s.", {
                    'volume_uid': udid,
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(
                    "Disconnect Volume: Failed to disconnect the "
                    "volume %(volume_id)s on ANY of the Virtual "
                    "I/O Servers.", {'volume_id': self.volume_id},
                    instance=self.instance)

        except Exception as e:
            LOG.exception(
                'PowerVM error detaching volume from virtual '
                'machine.',
                instance=self.instance)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
Пример #26
0
 def setUp(self):
     super(TestFeedTask, self).setUp()
     self.getter = lpar.LPAR.getter(self.adpt)
     # Set this up for getter.get()
     self.adpt.read.return_value = self.resp
     self.feed_task = tx.FeedTask('name', lpar.LPAR.getter(self.adpt))
Пример #27
0
    def execute(self, wrapper_task_rets):
        """Remove the storage elements associated with the deleted mappings.

        We remove storage elements for each VIOS, but only those we can be sure
        belong ONLY to that VIOS.  That is, we do not remove SSP Logical Units
        because they may be mapped from some other VIOS in the cluster - one we
        don't even know about.
        """
        # Accumulate removal tasks
        rmtasks = []
        for vuuid, rets in wrapper_task_rets.items():
            vwrap = rets['wrapper']
            # VFC mappings don't have storage we can get to, so ignore those.

            # We may get removals from more than one subtask.  All will have
            # the 'vscsi_removals_' prefix.  There may be some overlap, but
            # the removal methods will ignore duplicates.
            vscsi_rms = []
            for vrk in (k for k in rets if k.startswith('vscsi_removals_')):
                vscsi_rms.extend(rets[vrk])

            # We can short out of this VIOS if no vscsi mappings were removed
            # from it.
            if not vscsi_rms:
                continue

            # Index remaining VSCSI mappings to isolate still-in-use storage.
            smindex = sm.index_mappings(vwrap.scsi_mappings)

            # Figure out which storage elements need to be removed.
            # o Some VSCSI mappings may not have backing storage.
            # o Ignore any storage elements that are still in use (still have
            # mappings associated with them).
            stg_els_to_remove = [
                rmap.backing_storage for rmap in vscsi_rms
                if rmap.backing_storage is not None
                and rmap.backing_storage.udid not in smindex['by-storage-udid']
            ]

            # If there's nothing left, we're done with this VIOS
            if not stg_els_to_remove:
                continue

            # Extract lists of each type of storage
            vopts_to_rm = []
            vdisks_to_rm = []
            for stg in stg_els_to_remove:
                if isinstance(stg, (stor.LU, stor.PV)):
                    LOG.warn(
                        _("Not removing storage %(stg_name)s of type "
                          "%(stg_type)s because it cannot be determined "
                          "whether it is still in use.  Manual "
                          "verification and cleanup may be necessary."), {
                              'stg_name': stg.name,
                              'stg_type': stg.schema_type
                          })
                elif isinstance(stg, stor.VOptMedia):
                    vopts_to_rm.append(stg)
                elif isinstance(stg, stor.VDisk):
                    vdisks_to_rm.append(stg)
                else:
                    LOG.warn(
                        _("Storage scrub ignoring storage element "
                          "%(stg_name)s because it is of unexpected type "
                          "%(stg_type)s."), {
                              'stg_name': stg.name,
                              'stg_type': stg.schema_type
                          })

            # Any storage to be deleted?
            if not any((vopts_to_rm, vdisks_to_rm)):
                continue

            # If we get here, we have storage that needs to be deleted from one
            # or more volume groups.  We don't have a way of knowing which ones
            # without REST calls, so get all VGs for this VIOS and delete from
            # all of them.  POST will only be done on VGs which actually need
            # updating.
            vgftsk = tx.FeedTask(
                'scrub_vg_vios_%s' % vuuid,
                stor.VG.getter(vwrap.adapter,
                               parent_class=vwrap.__class__,
                               parent_uuid=vwrap.uuid))
            if vdisks_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vdisks,
                    vdisks_to_rm,
                    logspec=(
                        LOG.warn,
                        _("Scrubbing the following %(vdcount)d Virtual Disks "
                          "from VIOS %(vios)s: %(vdlist)s"), {
                              'vdcount':
                              len(vdisks_to_rm),
                              'vios':
                              vwrap.name,
                              'vdlist': [
                                  "%s (%s)" % (vd.name, vd.udid)
                                  for vd in vdisks_to_rm
                              ]
                          }))
            if vopts_to_rm:
                vgftsk.add_functor_subtask(
                    _rm_vopts,
                    vopts_to_rm,
                    logspec=
                    (LOG.warn,
                     _("Scrubbing the following %(vocount)d Virtual Opticals "
                       "from VIOS %(vios)s: %(volist)s"), {
                           'vocount':
                           len(vopts_to_rm),
                           'vios':
                           vwrap.name,
                           'volist': [
                               "%s (%s)" % (vo.name, vo.udid)
                               for vo in vopts_to_rm
                           ]
                       }))
            rmtasks.append(vgftsk)

        # We only created removal Tasks if we found something to remove.
        if rmtasks:
            # Execute any storage removals in parallel, max 8 threads.
            tf_eng.run(tf_uf.Flow('remove_storage').add(*rmtasks),
                       engine='parallel',
                       executor=tx.ContextThreadPoolExecutor(
                           max(8, len(rmtasks))))
Пример #28
0
    def test_destroy(self, mock_bldftsk, mock_cdrb, mock_cfgdrv, mock_dlt_lpar,
                     mock_unplug, mock_detach_vol):
        """Validates PowerVM destroy."""
        self.drv.host_wrapper = mock.Mock()
        self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
                                                 instance=True)

        mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
        mock_bldftsk.return_value = mock_ftsk
        block_device_info = self._fake_bdms()

        # Good path, with config drive, destroy disks
        mock_cdrb.return_value = True
        self.drv.destroy('context',
                         self.inst, [],
                         block_device_info=block_device_info)
        self.pwroff.assert_called_once_with(self.adp,
                                            self.inst,
                                            force_immediate=True)
        mock_bldftsk.assert_called_once_with(self.adp,
                                             xag=[pvm_const.XAG.VIO_SMAP])
        mock_unplug.assert_called_once()
        mock_cdrb.assert_called_once_with(self.inst)
        mock_cfgdrv.assert_called_once_with(self.adp)
        mock_cfgdrv.return_value.dlt_vopt.assert_called_once_with(
            self.inst, stg_ftsk=mock_bldftsk.return_value)
        self.assertEqual(2, mock_detach_vol.call_count)
        self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
        self.drv.disk_dvr.delete_disks.assert_called_once_with(
            self.drv.disk_dvr.detach_disk.return_value)
        mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)

        self.pwroff.reset_mock()
        mock_bldftsk.reset_mock()
        mock_unplug.reset_mock()
        mock_cdrb.reset_mock()
        mock_cfgdrv.reset_mock()
        self.drv.disk_dvr.detach_disk.reset_mock()
        self.drv.disk_dvr.delete_disks.reset_mock()
        mock_detach_vol.reset_mock()
        mock_dlt_lpar.reset_mock()

        # No config drive, preserve disks, no block device info
        mock_cdrb.return_value = False
        self.drv.destroy('context',
                         self.inst, [],
                         block_device_info={},
                         destroy_disks=False)
        mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
        mock_detach_vol.assert_not_called()
        self.drv.disk_dvr.delete_disks.assert_not_called()

        # Non-forced power_off, since preserving disks
        self.pwroff.assert_called_once_with(self.adp,
                                            self.inst,
                                            force_immediate=False)
        mock_bldftsk.assert_called_once_with(self.adp,
                                             xag=[pvm_const.XAG.VIO_SMAP])
        mock_unplug.assert_called_once()
        mock_cdrb.assert_called_once_with(self.inst)
        mock_cfgdrv.assert_not_called()
        mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
        self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
        self.drv.disk_dvr.delete_disks.assert_not_called()
        mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)

        self.pwroff.reset_mock()
        mock_bldftsk.reset_mock()
        mock_unplug.reset_mock()
        mock_cdrb.reset_mock()
        mock_cfgdrv.reset_mock()
        self.drv.disk_dvr.detach_disk.reset_mock()
        self.drv.disk_dvr.delete_disks.reset_mock()
        mock_dlt_lpar.reset_mock()

        # InstanceNotFound exception, non-forced
        self.pwroff.side_effect = exception.InstanceNotFound(
            instance_id='something')
        self.drv.destroy('context',
                         self.inst, [],
                         block_device_info={},
                         destroy_disks=False)
        self.pwroff.assert_called_once_with(self.adp,
                                            self.inst,
                                            force_immediate=False)
        self.drv.disk_dvr.detach_disk.assert_not_called()
        mock_unplug.assert_not_called()
        self.drv.disk_dvr.delete_disks.assert_not_called()
        mock_dlt_lpar.assert_not_called()

        self.pwroff.reset_mock()
        self.pwroff.side_effect = None
        mock_unplug.reset_mock()

        # Convertible (PowerVM) exception
        mock_dlt_lpar.side_effect = pvm_exc.TimeoutError("Timed out")
        self.assertRaises(exception.InstanceTerminationFailure,
                          self.drv.destroy,
                          'context',
                          self.inst, [],
                          block_device_info={})

        # Everything got called
        self.pwroff.assert_called_once_with(self.adp,
                                            self.inst,
                                            force_immediate=True)
        mock_unplug.assert_called_once()
        self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
        self.drv.disk_dvr.delete_disks.assert_called_once_with(
            self.drv.disk_dvr.detach_disk.return_value)
        mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)

        # Other random exception raises directly
        mock_dlt_lpar.side_effect = ValueError()
        self.assertRaises(ValueError,
                          self.drv.destroy,
                          'context',
                          self.inst, [],
                          block_device_info={})
Пример #29
0
 def test_no_subtasks(self, mock_flow):
     """Ensure that a FeedTask with no Subtasks is a no-op."""
     # No REST mocks - any REST calls will blow up.
     # Mocking Flow initializer to fail, ensuring it doesn't get called.
     mock_flow.side_effect = self.fail
     tx.FeedTask('feed_task', lpar.LPAR.getter(None)).execute()