コード例 #1
0
def get_flow(input_data=None):
    input_task = InputGatheringTask(inputs=input_data)
    user_task = UserMigrationTask('user_migration_task')
    tenant_task = TenantMigrationTask('tenant_migration_task')
    flavor_task = FlavorMigrationTask('flavor_migration_task')
    role_task = RoleMigrationTask('role_migration_task')
    image_task = ImageMigrationTask('image_migration_task')
    instance_task = InstanceMigrationTask('instances_migration_task')
    keypair_task = KeypairMigrationTask('Keypairs_migration_task')

    proj_quota_task = UpdateProjectsQuotasTask('update_projects_quotas')
    pr_binding_task = ProjectUserRoleBindingTask('project_roles_bind_task')

    flow = lf.Flow('main_flow').add(
        task.FunctorTask(input_task.execute,
                         provides={
                             'users_to_move', 'tenants_to_move',
                             'flavors_to_migrate', 'images_to_migrate',
                             'tenant_to_process', 'keypairs_to_move',
                             'roles_to_migrate', 'tenant_vm_dicts'
                         }),
        uf.Flow('user_tenant_migration_flow').add(
            # Note that creating users, tenants, flavor and role can happen in
            # parallel and hence it is part of unordered flow
            task.FunctorTask(user_task.execute,
                             name='user_task',
                             rebind={'users_to_move': "users_to_move"}),
            task.FunctorTask(tenant_task.execute,
                             name='tenant_task',
                             rebind={'tenants_to_move': "tenants_to_move"}),
            task.FunctorTask(
                flavor_task.execute,
                name='flavor_task',
                rebind={'flavors_to_migrate': "flavors_to_migrate"}),
            task.FunctorTask(role_task.execute,
                             name='role_task',
                             rebind={'roles_to_migrate': "roles_to_migrate"})),
        # TODO: Add other tasks to the flow e.g migrate image, private key etc.
        task.FunctorTask(image_task.execute,
                         name='image_task',
                         rebind={
                             'images_to_migrate': "images_to_migrate",
                             'tenant_to_process': 'tenant_to_process'
                         }),
        task.FunctorTask(keypair_task.execute,
                         name='keypair_task',
                         rebind={'keypairs_to_move': "keypairs_to_move"}),
        task.FunctorTask(instance_task.execute,
                         name='instance_task',
                         rebind={'tenant_vm_dicts': "tenant_vm_dicts"}),

        # post migration task:
        task.FunctorTask(proj_quota_task.execute,
                         name='update_project_quota_task'),
        task.FunctorTask(pr_binding_task.execute,
                         name='project_role_binding_task'))

    return flow
コード例 #2
0
    def _add_remove_hdisk(self, vio_wrap, device_name, stg_ftsk=None):
        """Adds a post-mapping task to remove the hdisk from the VIOS.

        This removal is only done after the mapping updates have completed.
        This method is also used during migration to remove hdisks that remain
        on the source host after the VM is migrated to the destination.

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
        """
        def rm_hdisk():
            LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
            try:
                # Attempt to remove the hDisk
                hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                                   vio_wrap.uuid)
            except Exception as e:
                # If there is a failure, log it, but don't stop the process
                LOG.warning(
                    _LW("There was an error removing the hdisk "
                        "%(disk)s from the Virtual I/O Server."),
                    {'disk': device_name})
                LOG.warning(e)

        # Check if there are not multiple mapping for the device
        if not self._check_host_mappings(vio_wrap, device_name):
            name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
            stg_ftsk = stg_ftsk or self.stg_ftsk
            stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
        else:
            LOG.info(
                _LI("hdisk %(disk)s is not removed because it has "
                    "existing storage mappings"), {'disk': device_name})
コード例 #3
0
    def _add_remove_hdisk(self, vio_wrap, device_name):
        """Adds a post-mapping task to remove the hdisk from the VIOS.

        This removal is only done after the mapping updates have completed.

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        """
        def rm_hdisk():
            LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
                     "%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
                     instance=self.instance)
            try:
                # Attempt to remove the hDisk
                hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                                   vio_wrap.uuid)
            except Exception:
                # If there is a failure, log it, but don't stop the process
                LOG.exception("There was an error removing the hdisk "
                              "%(disk)s from Virtual I/O Server %(vios)s.",
                              {'disk': device_name, 'vios': vio_wrap.name},
                              instance=self.instance)

        # Check if there are not multiple mapping for the device
        if not self._check_host_mappings(vio_wrap, device_name):
            name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
            self.stg_ftsk.add_post_execute(task.FunctorTask(
                rm_hdisk, name=name))
        else:
            LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
                     "%(vios)s because it has existing storage mappings",
                     {'disk': device_name, 'vios': vio_wrap.name},
                     instance=self.instance)
コード例 #4
0
    def dlt_vopt(self, instance, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param instance: The nova instance whose VOpt(s) are to be removed.
        :param stg_ftsk: A FeedTask. The actions to modify the storage will be
                         added as batched functions onto the FeedTask.
        """
        lpar_uuid = vm.get_pvm_uuid(instance)

        # The matching function for find_maps, remove_maps
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        # Add a function to remove the mappings
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            tsk_map.remove_maps, lpar_uuid, match_func=match_func)

        # Find the VOpt device based from the mappings
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=lpar_uuid,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info("Removing virtual optical storage.", instance=instance)
            vg_wrap = pvm_stg.VG.get(self.adapter,
                                     uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Add task to remove the media if it exists
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #5
0
    def test_power_on_revert(self, mock_pwroff, mock_pwron):
        flow = tf_lf.Flow('revert_power_on')
        pwron = tf_vm.PowerOn(self.apt, self.instance, pwr_opts='opt')
        flow.add(pwron)

        # Dummy Task that fails, triggering flow revert
        def failure(*a, **k):
            raise ValueError()

        flow.add(tf_tsk.FunctorTask(failure))

        # When PowerOn.execute doesn't fail, revert calls power_off
        self.assertRaises(ValueError, tf_eng.run, flow)
        mock_pwron.assert_called_once_with(self.apt, self.instance, opts='opt')
        mock_pwroff.assert_called_once_with(self.apt,
                                            self.instance,
                                            force_immediate=True)

        mock_pwron.reset_mock()
        mock_pwroff.reset_mock()

        # When PowerOn.execute fails, revert doesn't call power_off
        mock_pwron.side_effect = exception.NovaException()
        self.assertRaises(exception.NovaException, tf_eng.run, flow)
        mock_pwron.assert_called_once_with(self.apt, self.instance, opts='opt')
        self.assertEqual(0, mock_pwroff.call_count)
コード例 #6
0
ファイル: decorators.py プロジェクト: pombredanne/taskflow
 def task_factory(execute, **factory_kwargs):
     merged = kwargs.copy()
     merged.update(factory_kwargs)
     # NOTE(imelnikov): we can't capture f here because for
     # bound methods and bound class methods the object it
     # is bound to is yet unknown at the moment
     return base.FunctorTask(execute, **merged)
コード例 #7
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        volume_id = self.connection_info['data']['volume_id']
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
コード例 #8
0
ファイル: media.py プロジェクト: andymcc/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk, remove_mappings=True):
        """Deletes the virtual optical and (optionally) scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR whose vopt is to be
                          removed.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        :param remove_mappings: (Optional, Default: True) If set to true, will
                                remove the SCSI mappings as part of the
                                operation.  If false, will leave the mapping
                                but detach the storage from it.  If the VM is
                                running, it may be necessary to do the latter
                                as some operating systems will not allow the
                                removal.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        def detach_vopt_from_map(vios_w):
            return tsk_map.detach_storage(vios_w,
                                          lpar_uuid,
                                          match_func=match_func)

        # Add a function to remove the map or detach the vopt
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping if remove_mappings else detach_vopt_from_map)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_wrap = pvm_stg.VG.get(self.adapter,
                                     uuid=self.vg_uuid,
                                     parent_type=pvm_vios.VIOS,
                                     parent_uuid=self.vios_uuid)
            tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)

        # Don't add this task if there is no media to delete (eg. config drive)
        if media_elems:
            stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #9
0
ファイル: utils.py プロジェクト: SEJeff/taskflow
def make_reverting_task(token, blowup=False):

    def do_revert(context, *args, **kwargs):
        context[token] = 'reverted'

    if blowup:

        def blow_up(context, *args, **kwargs):
            raise Exception("I blew up")

        return task.FunctorTask(blow_up, name='blowup_%s' % token)
    else:

        def do_apply(context, *args, **kwargs):
            context[token] = 'passed'

        return task.FunctorTask(do_apply, revert=do_revert,
                                name='do_apply_%s' % token)
コード例 #10
0
    def _connect_volume(self, slot_mgr):
        """Connects the volume.

        :param connect_volume_to_vio: Function to connect a volume to the vio.
                                      :param vios_w: Vios wrapper.
                                      :return: True if mapping was created.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """

        # Its about to get weird.  The transaction manager has a list of
        # VIOSes.  We could use those, but they only have SCSI mappings (by
        # design).  They do not have storage (super expensive).
        #
        # We need the storage xag when we are determining which mappings to
        # add to the system.  But we don't want to tie it to the stg_ftsk.  If
        # we do, every retry, every etag gather, etc... takes MUCH longer.
        #
        # So we get the VIOSes with the storage xag here, separately, to save
        # the stg_ftsk from potentially having to run it multiple times.
        connect_ftsk = tx.FeedTask(
            'connect_volume_to_vio',
            pvm_vios.VIOS.getter(
                self.adapter,
                xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP]))

        # Find valid hdisks and map to VM.
        connect_ftsk.add_functor_subtask(self._connect_volume_to_vio,
                                         slot_mgr,
                                         provides='vio_modified',
                                         flag_update=False)

        ret = connect_ftsk.execute()

        # Check the number of VIOSes
        vioses_modified = 0
        for result in ret['wrapper_task_rets'].values():
            if result['vio_modified']:
                vioses_modified += 1

        partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

        # Update the slot information
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, self._get_udid(), partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self._validate_vios_on_connection(vioses_modified)
        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_slot_info,
                             name='hdisk_slot_%s' % self._get_udid()))
コード例 #11
0
ファイル: fileio.py プロジェクト: esberglu/nova-powervm
    def _connect_volume(self, slot_mgr):
        path = self._get_path()
        # Get the File Path
        fio = pvm_stg.FileIO.bld(self.adapter,
                                 path,
                                 backstore_type=pvm_stg.BackStoreType.FILE_IO)

        def add_func(vios_w):
            # If the vios doesn't match, just return
            if vios_w.uuid not in self.vios_uuids:
                return None

            LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
                     {'vios': vios_w.name},
                     instance=self.instance)
            slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
            if slot_mgr.is_rebuild and not slot:
                LOG.debug(
                    'Detected a device with path %(path)s on VIOS '
                    '%(vios)s on the rebuild that did not exist on the '
                    'source. Ignoring.', {
                        'path': path,
                        'vios': vios_w.uuid
                    },
                    instance=self.instance)
                return None

            mapping = tsk_map.build_vscsi_mapping(self.host_uuid,
                                                  vios_w,
                                                  self.vm_uuid,
                                                  fio,
                                                  lpar_slot_num=slot,
                                                  lua=lua)
            return tsk_map.add_map(vios_w, mapping)

        self.stg_ftsk.add_functor_subtask(add_func)

        # Run after all the deferred tasks the query to save the slots in the
        # slot map.
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, path, partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_slot_info, name='file_io_slot_%s' % path))
コード例 #12
0
ファイル: rbd.py プロジェクト: esberglu/nova-powervm
    def _connect_volume(self, slot_mgr):
        name = self.connection_info["data"]["name"]
        # Get the File Path
        rbd = pvm_stg.RBD.bld_ref(self.adapter, name)

        def add_func(vios_w):
            # If the vios doesn't match, just return
            if vios_w.uuid not in self.vios_uuids:
                return None

            LOG.info("Adding rbd disk connection to VIOS %(vios)s.",
                     {'vios': vios_w.name},
                     instance=self.instance)
            slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, name)
            if slot_mgr.is_rebuild and not slot:
                LOG.debug(
                    'Detected a device with path %(path)s on VIOS '
                    '%(vios)s on the rebuild that did not exist on the '
                    'source. Ignoring.', {
                        'path': name,
                        'vios': vios_w.uuid
                    },
                    instance=self.instance)
                return None

            mapping = tsk_map.build_vscsi_mapping(self.host_uuid,
                                                  vios_w,
                                                  self.vm_uuid,
                                                  rbd,
                                                  lpar_slot_num=slot,
                                                  lua=lua)
            return tsk_map.add_map(vios_w, mapping)

        self.stg_ftsk.add_functor_subtask(add_func)

        # Run after all the deferred tasks the query to save the slots in the
        # slot map.
        def set_slot_info():
            vios_wraps = self.stg_ftsk.feed
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
            for vios_w in vios_wraps:
                scsi_map = pvm_c_stor.udid_to_scsi_mapping(
                    vios_w, name, partition_id)
                if not scsi_map:
                    continue
                slot_mgr.register_vscsi_mapping(scsi_map)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_slot_info, name='rbd_slot_%s' % name))
コード例 #13
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(
                    _LE("Mappings were not able to find a proper VIOS. "
                        "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
コード例 #14
0
 def create_task(self, function, requires=None, provides=None,
                 inject=None, **kwargs):
     name = kwargs.get('name', None)
     auto_extract = kwargs.get('auto_extract', True)
     rebind = kwargs.get('rebind', None)
     revert = kwargs.get('revert', None)
     version = kwargs.get('version', None)
     if function:
         return task.FunctorTask(function,
                                 name=name,
                                 provides=provides,
                                 requires=requires,
                                 auto_extract=auto_extract,
                                 rebind=rebind,
                                 revert=revert,
                                 version=version,
                                 inject=inject)
コード例 #15
0
ファイル: media.py プロジェクト: sarkartanzil/nova-powervm
    def add_dlt_vopt_tasks(self, lpar_uuid, stg_ftsk):
        """Deletes the virtual optical and scsi mappings for a VM.

        :param lpar_uuid: The pypowervm UUID of the LPAR to remove.
        :param stg_ftsk: A FeedTask handling storage I/O.  The task to remove
                         the mappings and media from the VM will be deferred on
                         to the FeedTask passed in. The execute can be done all
                         in one method (batched together).  No updates are
                         actually made here; they are simply added to the
                         FeedTask.
        """
        # The function to find the VOpt
        match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)

        def rm_vopt_mapping(vios_w):
            return tsk_map.remove_maps(vios_w,
                                       lpar_uuid,
                                       match_func=match_func)

        # Add a function to remove the map
        stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
            rm_vopt_mapping)

        # Find the vOpt device (before the remove is done) so that it can be
        # removed.
        partition_id = vm.get_vm_id(self.adapter, lpar_uuid)
        media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper(
            self.vios_uuid).scsi_mappings,
                                           client_lpar_id=partition_id,
                                           match_func=match_func)
        media_elems = [x.backing_storage for x in media_mappings]

        def rm_vopt():
            LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
                     lpar_uuid)
            vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                       root_id=self.vios_uuid,
                                       child_type=pvm_stg.VG.schema_type,
                                       child_id=self.vg_uuid)
            tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)

        stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
コード例 #16
0
ファイル: vscsi.py プロジェクト: pratgohi/nova-powervm
    def _add_remove_hdisk(self, vio_wrap, device_name):
        """Adds a post-mapping task to remove the hdisk from the VIOS.

        This removal is only done after the mapping updates have completed.

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        """
        def rm_hdisk():
            try:
                # Attempt to remove the hDisk
                hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                                   vio_wrap.uuid)
            except Exception as e:
                # If there is a failure, log it, but don't stop the process
                LOG.warn(
                    _LW("There was an error removing the hdisk "
                        "%(disk)s from the Virtual I/O Server."),
                    {'disk': device_name})
                LOG.warn(e)

        name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
        self.stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
コード例 #17
0
 def create_taskflow(self, payload):
     flow = lf.Flow("install_vm").add(
         task.FunctorTask(create_instance, provides='created_instance'))
コード例 #18
0
def get_flow(context,
             db,
             driver,
             request_spec=None,
             filter_properties=None,
             volume_id=None,
             snapshot_id=None,
             image_id=None):
    """Constructs and returns the scheduler entrypoint flow.

    This flow will do the following:

    1. Inject keys & values for dependent tasks.
    2. Extracts a scheduler specification from the provided inputs.
    3. Attaches 2 activated only on *failure* tasks (one to update the db
       status and one to notify on the MQ of the failure that occurred).
    4. Uses provided driver to to then select and continue processing of
       volume request.
    """
    create_what = {
        'context': context,
        'raw_request_spec': request_spec,
        'filter_properties': filter_properties,
        'volume_id': volume_id,
        'snapshot_id': snapshot_id,
        'image_id': image_id,
    }

    flow_name = ACTION.replace(":", "_") + "_scheduler"
    scheduler_flow = linear_flow.Flow(flow_name)

    # This will extract and clean the spec from the starting values.
    scheduler_flow.add(
        ExtractSchedulerSpecTask(db,
                                 rebind={'request_spec': 'raw_request_spec'}))

    def schedule_create_volume(context, request_spec, filter_properties):
        def _log_failure(cause):
            LOG.error(
                _("Failed to schedule_create_volume: %(cause)s") %
                {'cause': cause})

        def _notify_failure(cause):
            """When scheduling fails send out a event that it failed."""
            topic = "scheduler.create_volume"
            payload = {
                'request_spec': request_spec,
                'volume_properties': request_spec.get('volume_properties', {}),
                'volume_id': volume_id,
                'state': 'error',
                'method': 'create_volume',
                'reason': cause,
            }
            try:
                publisher_id = notifier.publisher_id("scheduler")
                notifier.notify(context, publisher_id, topic, notifier.ERROR,
                                payload)
            except exception.CinderException:
                LOG.exception(
                    _("Failed notifying on %(topic)s "
                      "payload %(payload)s") % {
                          'topic': topic,
                          'payload': payload
                      })

        try:
            driver.schedule_create_volume(context, request_spec,
                                          filter_properties)
        except exception.NoValidHost as e:
            # Not host found happened, notify on the scheduler queue and log
            # that this happened and set the volume to errored out and
            # *do not* reraise the error (since whats the point).
            _notify_failure(e)
            _log_failure(e)
            common.error_out_volume(context, db, volume_id, reason=e)
        except Exception as e:
            # Some other error happened, notify on the scheduler queue and log
            # that this happened and set the volume to errored out and
            # *do* reraise the error.
            with excutils.save_and_reraise_exception():
                _notify_failure(e)
                _log_failure(e)
                common.error_out_volume(context, db, volume_id, reason=e)

    scheduler_flow.add(task.FunctorTask(schedule_create_volume))

    # Now load (but do not run) the flow using the provided initial data.
    return taskflow.engines.load(scheduler_flow, store=create_what)
コード例 #19
0
ファイル: test_task.py プロジェクト: paperandsoap/taskflow
 def test_creation_with_version(self):
     version = (2, 0)
     f_task = task.FunctorTask(lambda: None, version=version)
     self.assertEqual(version, f_task.version)
コード例 #20
0
ファイル: iscsi.py プロジェクト: esberglu/nova-powervm
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)
            device_name = None
            try:
                device_name = self._get_devname()

                if not device_name:
                    # We lost our bdm data.

                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on future
                    # deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                target_iqn = self.connection_info["data"]["target_iqn"]

                def logout():
                    hdisk.remove_iscsi(self.adapter, target_iqn, vios_w.uuid)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(logout,
                                     name='remove_iSCSI_%s' % target_iqn))
            # Found a valid element to remove
            return True
コード例 #21
0
ファイル: npiv.py プロジェクト: pratgohi/nova-powervm
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        Will check if the Fabric is mapped to the management partition.  If it
        is, then it will remove the mappings and update the fabric state. This
        is because, in order for the WWPNs to be on the fabric (for Cinder)
        before the VM is online, the WWPNs get mapped to the management
        partition.

        This method will remove from the management partition (if needed), and
        then assign it to the instance itself.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed

        # If currently mapped to the mgmt partition, remove the mappings so
        # that they can be added to the client.
        if self._get_fabric_state(fabric) == FS_MGMT_MAPPED:
            mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

            # Each port mapping should be removed from the VIOS.
            for npiv_port_map in npiv_port_maps:
                vios_w = pvm_vfcm.find_vios_for_port_map(
                    vios_wraps, npiv_port_map)
                ls = [
                    LOG.info,
                    _LI("Removing NPIV mapping for mgmt partition "
                        "for instance %(inst)s on VIOS %(vios)s."), {
                            'inst': self.instance.name,
                            'vios': vios_w.name
                        }
                ]

                # Add the subtask to remove the map from the mgmt partition
                self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                    pvm_vfcm.remove_maps,
                    mgmt_uuid,
                    port_map=npiv_port_map,
                    logspec=ls)

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        volume_id = self.connection_info['data']['volume_id']
        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
コード例 #22
0

# These two functions connect into the state transition notification emission
# points that the engine outputs, they can be used to log state transitions
# that are occurring, or they can be used to suspend the engine (or perform
# other useful activities).
def flow_watch(state, details):
    print('Flow => %s' % state)


def task_watch(state, details):
    print('Task %s => %s' % (details.get('task_name'), state))


flow = lf.Flow("make-auto").add(
    task.FunctorTask(startup, revert=trash, provides='ran'),
    # A graph flow allows automatic dependency based ordering, the ordering
    # is determined by analyzing the symbols required and provided and ordering
    # execution based on a functioning order (if one exists).
    gf.Flow("install-parts").add(
        task.FunctorTask(build_frame, provides='frame'),
        task.FunctorTask(build_engine, provides='engine'),
        task.FunctorTask(build_doors, provides='doors'),
        task.FunctorTask(build_wheels, provides='wheels'),
        # These *_installed outputs allow for other tasks to depend on certain
        # actions being performed (aka the components were installed), another
        # way to do this is to link() the tasks manually instead of creating
        # an 'artificial' data dependency that accomplishes the same goal the
        # manual linking would result in.
        task.FunctorTask(install_engine, provides='engine_installed'),
        task.FunctorTask(install_doors, provides='doors_installed'),
コード例 #23
0
from taskflow import task
from taskflow import engines
from taskflow.patterns import linear_flow


def exec(x):
    print(x * 2)
    raise IOError


def rev(x, *args, **kwargs):
    print("In revert method")


func_task = task.FunctorTask(execute=exec,
                             revert=rev,
                             name="samplefunctor",
                             inject={"x": 2})

flow = linear_flow.Flow('send_message').add(func_task)
e = engines.load(flow)
e.run()
コード例 #24
0
 def test_other_name(self):
     task = base.FunctorTask(add, name='my task')
     self.assertEquals(task.name, 'my task')
コード例 #25
0
 def test_simple(self):
     task = base.FunctorTask(add)
     self.assertEquals(task.name, __name__ + '.add')
コード例 #26
0
    def test_wrapper_task_rets(self):
        # Limit the feed to two to keep the return size sane
        ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2]))
        ftsk = tx.FeedTask('subtask_rets',
                           lpar.LPAR.getter(None),
                           update_timeout=123)
        exp_wtr = {
            wrp.uuid: {
                'wrapper': wrp,
                'the_id': wrp.id,
                'the_name': wrp.name
            }
            for wrp in ftsk.feed
        }
        called = []

        def return_wrapper_name(wrapper):
            return wrapper.name

        def return_wrapper_id(wrapper):
            return wrapper.id

        def verify_rets_implicit(wrapper_task_rets):
            called.append('implicit')
            self.assertEqual(exp_wtr, wrapper_task_rets)
            return 'verify_rets_implicit_return'

        def verify_rets_explicit(**kwargs):
            called.append('explicit')
            self.assertEqual(exp_wtr, kwargs['wrapper_task_rets'])
            return 'verify_rets_explicit_return'

        ftsk.add_functor_subtask(return_wrapper_name, provides='the_name')
        ftsk.add_functor_subtask(return_wrapper_id, provides='the_id')
        # Execute once here to make sure the return is in the right shape when
        # there are no post-execs
        self.assertEqual(
            {
                'wrapper_task_rets': {
                    self.entries[0].uuid: {
                        'the_name': self.entries[0].name,
                        'the_id': self.entries[0].id,
                        'wrapper': self.entries[0]
                    },
                    self.entries[1].uuid: {
                        'the_name': self.entries[1].name,
                        'the_id': self.entries[1].id,
                        'wrapper': self.entries[1]
                    }
                }
            }, ftsk.execute())

        ftsk.add_post_execute(
            tf_task.FunctorTask(verify_rets_implicit,
                                provides='post_exec_implicit'))
        ftsk.add_post_execute(
            tf_task.FunctorTask(verify_rets_explicit,
                                requires='wrapper_task_rets',
                                provides='post_exec_explicit'))

        ret = ftsk.execute()
        # Make sure the post-execs actually ran (to guarantee their internal
        # assertions passed).
        self.assertEqual(['implicit', 'explicit'], called)
        ftfx.patchers['update'].mock.assert_called_with(mock.ANY, timeout=123)
        # Verify that we got the returns from the subtasks AND the post-execs
        self.assertEqual(
            {
                'wrapper_task_rets': {
                    self.entries[0].uuid: {
                        'the_name': self.entries[0].name,
                        'the_id': self.entries[0].id,
                        'wrapper': self.entries[0]
                    },
                    self.entries[1].uuid: {
                        'the_name': self.entries[1].name,
                        'the_id': self.entries[1].id,
                        'wrapper': self.entries[1]
                    }
                },
                'post_exec_implicit': 'verify_rets_implicit_return',
                'post_exec_explicit': 'verify_rets_explicit_return'
            }, ret)
コード例 #27
0
ファイル: npiv.py プロジェクト: openstack/nova-powervm
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['serial']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(
            slot_mgr.build_map.get_vfc_slots(fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(
                    "Mappings were not able to find a proper VIOS. "
                    "The port mappings were %s.",
                    npiv_port_maps,
                    instance=self.instance)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [
                LOG.info, "Adding NPIV mapping for instance %(inst)s "
                "for Virtual I/O Server %(vios)s.", {
                    'inst': self.instance.name,
                    'vios': vios_w.name
                }
            ]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                lpar_slot_num=slot_num,
                logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(
                    vios_w, c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_vol_meta,
                             name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
コード例 #28
0
def flow_watch(state, details):
    print('Flow => %s' % state)


def task_watch(state, details):
    print('Task %s => %s' % (details.get('task_name'), state))


# Wrap your functions into a task type that knows how to treat your functions
# as tasks. There was previous work done to just allow a function to be
# directly passed, but in python 3.0 there is no easy way to capture an
# instance method, so this wrapping approach was decided upon instead which
# can attach to instance methods (if thats desired).
flow = lf.Flow("Call-them")
flow.add(task.FunctorTask(execute=call_jim))
flow.add(task.FunctorTask(execute=call_joe))

# Now load (but do not run) the flow using the provided initial data.
engine = taskflow.engines.load(flow, store={
    'context': {
        "joe_number": 444,
        "jim_number": 555,
    }
})

# This is where we attach our callback functions to the 2 different
# notification objects that a engine exposes. The usage of a '*' (kleene star)
# here means that we want to be notified on all state changes, if you want to
# restrict to a specific state change, just register that instead.
engine.notifier.register('*', flow_watch)
コード例 #29
0
 def log_task(msg):
     return tf_task.FunctorTask(log_func(msg), name='functor_%s' % msg)
コード例 #30
0
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            # Check if the vios uuid exist in the list
            if vios_w.uuid not in self.vios_uuids:
                LOG.debug(
                    "Skipping disconnect of volume %(vol)s from "
                    "inactive vios uuid %(uuid)s.",
                    dict(vol=self.volume_id, uuid=vios_w.uuid))
                return False

            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid),
                      instance=self.instance)

            device_name = None
            try:
                udid = self._get_udid()
                if udid:
                    # Get the device name using UniqueDeviceID Identifier.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on
                    # future deploys will clean this up.
                    LOG.warning(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy.", {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                        instance=self.instance)
                    return False

            except Exception:
                LOG.exception(
                    "Disconnect Volume: Failed to find device on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s.", {
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                    instance=self.instance)
                return False

            # We have found the device name
            LOG.info(
                "Disconnect Volume: Discovered the device %(hdisk)s "
                "on Virtual I/O Server %(vios_name)s for volume "
                "%(volume_id)s.", {
                    'volume_id': self.volume_id,
                    'vios_name': vios_w.name,
                    'hdisk': device_name
                },
                instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                conn_data = self._get_iscsi_conn_props(vios_w)
                if not conn_data:
                    return False
                iqn = conn_data.get("target_iqns", conn_data.get("target_iqn"))
                portal = conn_data.get("target_portals",
                                       conn_data.get("target_portal"))
                lun = conn_data.get("target_luns", conn_data.get("target_lun"))

                def remove():
                    try:
                        hdisk.remove_iscsi(self.adapter,
                                           iqn,
                                           vios_w.uuid,
                                           lun=lun,
                                           iface_name=self.iface_name,
                                           portal=portal,
                                           multipath=self._is_multipath())
                    except (pvm_exc.ISCSIRemoveFailed,
                            pvm_exc.JobRequestFailed) as e:
                        LOG.warning(e)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(remove,
                                     name='remove_%s_from_vios_%s' %
                                     (device_name, vios_w.uuid)))

            # Found a valid element to remove
            return True