Exemplo n.º 1
0
    def __init__(self,
                 cluster,
                 nodes,
                 affected_nodes=None,
                 task_ids=None,
                 events=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param affected_nodes: the list of nodes, that affected by deployment
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        """
        if affected_nodes:
            self.affected_node_ids = frozenset(n.uid for n in affected_nodes)
            self.deployment_nodes = copy.copy(nodes)
            self.deployment_nodes.extend(affected_nodes)
        else:
            self.deployment_nodes = nodes
            self.affected_node_ids = frozenset()
        self.cluster = cluster
        self.role_resolver = RoleResolver(self.deployment_nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_connections = collections.defaultdict(dict)
        self.tasks_dictionary = dict()
        self.task_filter = self.make_task_filter(task_ids)
        self.events = events
    def __init__(self, cluster, nodes):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
Exemplo n.º 3
0
    def __init__(self, cluster, nodes, task_ids=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
        self.task_filter = self.make_task_filter(task_ids)
Exemplo n.º 4
0
    def __init__(self, cluster, nodes,
                 affected_nodes=None, task_ids=None, events=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param affected_nodes: the list of nodes, that affected by deployment
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        """
        if affected_nodes:
            self.affected_node_ids = frozenset(n.uid for n in affected_nodes)
            self.deployment_nodes = copy.copy(nodes)
            self.deployment_nodes.extend(affected_nodes)
        else:
            self.deployment_nodes = nodes
            self.affected_node_ids = frozenset()
        self.cluster = cluster
        self.role_resolver = RoleResolver(self.deployment_nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_connections = collections.defaultdict(dict)
        self.tasks_dictionary = dict()
        self.task_filter = self.make_task_filter(task_ids)
        self.events = events
Exemplo n.º 5
0
    def __init__(self, cluster, nodes, role_resolver=None):
        """Initialises.

        :param cluster: the cluster object instance
        :param nodes: the list of nodes for deployment
        :param role_resolver: the instance of BaseRoleResolver
        """

        self.cluster = cluster
        self.nodes = nodes
        self.role_resolver = role_resolver or RoleResolver(nodes)
    def __init__(self, cluster, nodes):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
Exemplo n.º 7
0
    def __init__(self, cluster, nodes, task_ids=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
        self.task_filter = self.make_task_filter(task_ids)
Exemplo n.º 8
0
    def stage_tasks_serialize(self, tasks, nodes):
        """Serialize tasks for certain stage

        :param stage: oneof consts.STAGES
        :param nodes: list of node db objects
        """
        serialized = []
        role_resolver = RoleResolver(nodes)

        for task in tasks:

            if self.graph.should_exclude_task(task['id']):
                continue

            serializer = self.serializers.get_stage_serializer(task)(
                task, self.cluster, nodes, role_resolver=role_resolver)

            if not serializer.should_execute():
                continue

            serialized.extend(serializer.serialize())
        return serialized
Exemplo n.º 9
0
 def initialize(self, cluster):
     self.all_nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
     self.role_resolver = RoleResolver(self.all_nodes)
     self.initialized = cluster.id
Exemplo n.º 10
0
class TasksSerializer(object):
    """The deploy tasks serializer."""

    def __init__(self, cluster, nodes, task_ids=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
        self.task_filter = self.make_task_filter(task_ids)

    @classmethod
    def serialize(cls, cluster, nodes, tasks, task_ids=None):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param tasks: the list of tasks
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes, task_ids)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks), nodes)
        serializer.resolve_dependencies()
        return dict(
            (k, list(six.itervalues(v)))
            for k, v in six.iteritems(serializer.tasks_per_node)
        )

    def resolve_nodes(self, tasks, nodes):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :param nodes: the list of nodes to deploy
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        groups = list()

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                groups.append(task)
            else:
                tasks_mapping[task['id']] = task
                self.process_task(
                    task, nodes, lambda _: self.role_resolver,
                    skip=not self.task_filter(task['id'])
                )

        self.expand_task_groups(groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_per_node.setdefault(None, dict())

    def process_task(self, task, nodes, resolver_factory, skip=False):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param nodes: the list of nodes
        :param resolver_factory: the factory creates role-resolver
        :param skip: make the task as skipped
        """

        serializer_factory = self.task_serializer.get_stage_serializer(
            task
        )
        task_serializer = serializer_factory(
            task, self.cluster, nodes, role_resolver=resolver_factory(nodes)
        )
        # do not pass skipped attribute to astute
        skipped = skip or task.pop('skipped', False) or \
            not task_serializer.should_execute()
        for astute_task in self.task_processor.process_tasks(
                task, task_serializer.serialize()):
            # all skipped task shall have type skipped
            # do not exclude them from graph to keep connections between nodes
            if skipped:
                astute_task['type'] = \
                    consts.ORCHESTRATOR_TASK_TYPES.skipped

            for node_id in astute_task.pop('uids', ()):
                node_tasks = self.tasks_per_node[node_id]
                # de-duplication the tasks on node
                # since task can be added after expand group need to
                # overwrite if existed task is skipped and new is not skipped.
                if self.need_update_task(node_tasks, astute_task):
                    node_tasks[astute_task['id']] = copy.deepcopy(astute_task)

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_per_node):
            for task in six.itervalues(tasks):
                task['requires'] = list(
                    self.expand_dependencies(
                        node_id, task.get('requires'), False
                    )
                )
                task['required_for'] = list(
                    self.expand_dependencies(
                        node_id, task.get('required_for'), True
                    )
                )
                task['requires'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depends', None), False
                    )
                )

                task['required_for'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depended-by', None), True
                    )
                )
                task['requires'].extend(task.pop('requires_ex', ()))
                task['required_for'].extend(task.pop('required_for_ex', ()))

    def expand_task_groups(self, groups, task_mapping):
        """Expand group of tasks.

        :param groups: the all tasks with type 'group'
        :param task_mapping: the mapping task id to task object
        """
        for task in groups:
            skipped = not self.task_filter(task['id'])
            node_ids = self.role_resolver.resolve(task.get('role', ()))
            for sub_task_id in task.get('tasks', ()):
                try:
                    sub_task = task_mapping[sub_task_id]
                except KeyError:
                    raise errors.InvalidData(
                        'Task %s cannot be resolved', sub_task_id
                    )

                # if group is not excluded, all task should be run as well
                # otherwise check each task individually
                self.process_task(
                    sub_task, node_ids, NullResolver,
                    skip=skipped and not self.task_filter(sub_task_id)
                )

    def expand_dependencies(self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            for rel in self.resolve_relation(name, node_ids, is_required_for):
                yield rel

    def expand_cross_dependencies(
            self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all)
                )
            relations = self.resolve_relation(
                dep['name'], node_ids, is_required_for
            )
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, is_required_for):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param is_required_for: means task from required_for section
        """
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_per_node[node_id]:
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    yield {"name": task_name, "node_id": node_id}
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                applied_tasks.add(original_task)
                if original_task is not task_name:
                    if is_required_for:
                        task_name_gen = self.task_processor.get_first_task_id
                    else:
                        task_name_gen = self.task_processor.get_last_task_id
                    task_name = task_name_gen(original_task)

                yield {"name": task_name, "node_id": node_id}

    @classmethod
    def need_update_task(cls, tasks, task):
        """Checks that task shall overwrite existed one or should be added.

        :param tasks: the current node tasks
        :param task: the astute task object
        :return True if task is not present or must be overwritten
                otherwise False
        """
        existed_task = tasks.get(task['id'])
        if existed_task is None:
            return True

        if existed_task['type'] == task['type']:
            return False

        return task['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped

    @classmethod
    def make_task_filter(cls, task_ids):
        """Makes task filter according to specified ids.

        :param task_ids: the selected  ids of tasks
        :return: function that check task
        """
        if not task_ids:
            return lambda _: True

        if not isinstance(task_ids, set):
            task_ids = set(task_ids)

        return lambda task_id: task_id in task_ids
    def setUpClass(cls):
        cls.tasks = [{
            'id': 'task1',
            'roles': ['controller'],
            'type': 'puppet',
            'version': '2.0.0',
            'condition': {
                'yaql_exp': '$.public_ssl.hostname = localhost'
            },
            'parameters': {},
            'requires': ['task3'],
            'required_for': ['task2'],
            'cross_depends': [{
                'name': 'task2',
                'role': 'compute'
            }],
            'cross_depended_by': [{
                'name': 'task3',
                'role': 'cinder'
            }]
        }, {
            'id': 'task2',
            'roles': ['compute', 'controller'],
            'type': 'puppet',
            'version': '2.0.0',
            'condition': {
                'yaql_exp': '$.public_ssl.hostname != localhost'
            },
            'parameters': {},
            'cross_depends': [{
                'name': 'task3',
                'role': 'cinder'
            }]
        }, {
            'id': 'task3',
            'roles': ['cinder', 'controller'],
            'type': 'puppet',
            'version': '2.0.0',
            'condition': 'settings:public_ssl.hostname != "localhost"',
            'parameters': {},
            'cross_depends': [{
                'name': 'task3',
                'role': '/.*/'
            }],
            'cross_depended_by': [{
                'name': 'task2',
                'role': 'self'
            }]
        }, {
            'id': 'task4',
            'roles': ['controller'],
            'type': 'puppet',
            'version': '2.0.0',
            'parameters': {},
            'cross_depended_by': [{
                'name': 'task3'
            }]
        }]

        cls.nodes = [
            mock.MagicMock(uid='1', roles=['controller']),
            mock.MagicMock(uid='2', roles=['compute']),
            mock.MagicMock(uid='3', roles=['cinder']),
            mock.MagicMock(uid='4', roles=['custom']),
        ]

        cls.context = lcm.TransactionContext({
            '1': {
                'cluster': {
                    'id': 1
                },
                'release': {
                    'version': 'liberty-9.0'
                },
                'openstack_version': 'liberty-9.0',
                'public_ssl': {
                    'hostname': 'localhost'
                },
                'attributes': {
                    'a_str': 'text1',
                    'a_int': 1
                }
            },
            '2': {
                'cluster': {
                    'id': 1
                },
                'release': {
                    'version': 'liberty-9.0'
                },
                'openstack_version': 'liberty-9.0',
                'public_ssl': {
                    'hostname': 'localhost'
                },
                'attributes': {
                    'a_str': 'text2',
                    'a_int': 2
                }
            },
            '3': {
                'cluster': {
                    'id': 1
                },
                'release': {
                    'version': 'liberty-9.0'
                },
                'openstack_version': 'liberty-9.0',
                'public_ssl': {
                    'hostname': 'localhost'
                },
                'attributes': {
                    'a_str': 'text3',
                    'a_int': 3
                }
            },
            '4': {
                'cluster': {
                    'id': 1
                },
                'release': {
                    'version': 'liberty-9.0'
                },
                'openstack_version': 'liberty-9.0',
                'public_ssl': {
                    'hostname': 'localhost'
                },
                'attributes': {
                    'a_str': 'text3',
                    'a_int': 3
                }
            }
        })

        with mock.patch('nailgun.utils.role_resolver.objects') as m_objects:
            m_objects.Node.all_roles = lambda x: x.roles
            cls.role_resolver = RoleResolver(cls.nodes)
Exemplo n.º 12
0
class DeploymentMultinodeSerializer(object):
    nova_network_serializer = nova_serializers.NovaNetworkDeploymentSerializer
    neutron_network_serializer = \
        neutron_serializers.NeutronNetworkDeploymentSerializer

    critical_roles = frozenset(('controller', 'ceph-osd', 'primary-mongo'))

    def __init__(self, tasks_graph=None):
        self.task_graph = tasks_graph
        self.all_nodes = None
        self.role_resolver = None
        self.initialized = None

    def initialize(self, cluster):
        self.all_nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
        self.role_resolver = RoleResolver(self.all_nodes)
        self.initialized = cluster.id

    def finalize(self):
        self.all_nodes = None
        self.role_resolver = None
        self.initialized = None

    def _ensure_initialized_for(self, cluster):
        # TODO(bgaifullin) need to move initialize into __init__
        if self.initialized != cluster.id:
            self.initialize(cluster)

    def serialize(self,
                  cluster,
                  nodes,
                  ignore_customized=False,
                  skip_extensions=False):
        """Method generates facts which are passed to puppet."""
        try:
            self.initialize(cluster)
            common_attrs = self.get_common_attrs(cluster)
            if not ignore_customized and cluster.replaced_deployment_info:
                # patch common attributes with custom deployment info
                utils.dict_update(common_attrs,
                                  cluster.replaced_deployment_info)

            if not skip_extensions:
                extensions.\
                    fire_callback_on_cluster_serialization_for_deployment(
                        cluster, common_attrs
                    )

            serialized_nodes = []

            origin_nodes = []
            customized_nodes = []
            if ignore_customized:
                origin_nodes = nodes
            else:
                for node in nodes:
                    if node.replaced_deployment_info:
                        customized_nodes.append(node)
                    else:
                        origin_nodes.append(node)

            serialized_nodes.extend(
                self.serialize_generated(origin_nodes, skip_extensions))
            serialized_nodes.extend(
                self.serialize_customized(customized_nodes))

            # NOTE(dshulyak) tasks should not be preserved from replaced
            #  deployment info, there is different mechanism to control
            #  changes in tasks introduced during granular deployment,
            #  and that mech should be used
            self.set_tasks(serialized_nodes)

            deployment_info = {
                'common': common_attrs,
                'nodes': serialized_nodes
            }
        finally:
            self.finalize()

        return deployment_info

    def serialize_generated(self, nodes, skip_extensions):
        serialized_nodes = self.serialize_nodes(nodes)
        nodes_map = {n.uid: n for n in nodes}

        self.set_deployment_priorities(serialized_nodes)
        for node_data in serialized_nodes:
            # the serialized nodes may contain fake nodes like master node
            # which does not have related db object. it shall be excluded.
            if not skip_extensions and node_data['uid'] in nodes_map:
                extensions.fire_callback_on_node_serialization_for_deployment(
                    nodes_map[node_data['uid']], node_data)
            yield node_data

    def serialize_customized(self, nodes):
        for node in nodes:
            for role_data in node.replaced_deployment_info:
                yield role_data

    def get_common_attrs(self, cluster):
        """Cluster attributes."""

        # tests call this method directly.
        # and we need this workaround to avoid refactoring a lot of tests.
        self._ensure_initialized_for(cluster)
        attrs = objects.Cluster.get_attributes(cluster)
        attrs = objects.Attributes.merged_attrs_values(attrs)

        attrs['deployment_mode'] = cluster.mode
        attrs['deployment_id'] = cluster.id
        attrs['openstack_version'] = cluster.release.version
        attrs['fuel_version'] = cluster.fuel_version
        attrs['nodes'] = self.node_list(self.all_nodes)

        # Adding params to workloads_collector
        if 'workloads_collector' not in attrs:
            attrs['workloads_collector'] = {}
        attrs['workloads_collector']['create_user'] = \
            objects.MasterNodeSettings.must_send_stats()
        username = attrs['workloads_collector'].pop('user', None)
        attrs['workloads_collector']['username'] = username

        if self.role_resolver.resolve(['cinder']):
            attrs['use_cinder'] = True

        net_serializer = self.get_net_provider_serializer(cluster)
        net_common_attrs = net_serializer.get_common_attrs(cluster, attrs)
        utils.dict_update(attrs, net_common_attrs)
        self.inject_list_of_plugins(attrs, cluster)

        return attrs

    @classmethod
    def node_list(cls, nodes):
        """Generate nodes list. Represents as "nodes" parameter in facts."""
        node_list = []

        for node in nodes:
            for role in objects.Node.all_roles(node):
                node_list.append(cls.serialize_node_for_node_list(node, role))

        return node_list

    @classmethod
    def serialize_node_for_node_list(cls, node, role):
        return {
            'uid': node.uid,
            'fqdn': objects.Node.get_node_fqdn(node),
            'name': objects.Node.get_slave_name(node),
            'role': role
        }

    # TODO(apopovych): we have more generical method 'filter_by_roles'
    def by_role(self, nodes, role):
        return filter(lambda node: node['role'] == role, nodes)

    def not_roles(self, nodes, roles):
        return filter(lambda node: node['role'] not in roles, nodes)

    def serialize_nodes(self, nodes):
        """Serialize node for each role.

        For example if node has two roles then
        in orchestrator will be passed two serialized
        nodes.
        """
        serialized_nodes = []
        for node in nodes:
            for role in objects.Node.all_roles(node):
                serialized_nodes.append(self.serialize_node(node, role))
        return serialized_nodes

    def serialize_node(self, node, role):
        """Serialize node, then it will be merged with common attributes."""
        node_attrs = {
            # Yes, uid is really should be a string
            'uid': node.uid,
            'fqdn': objects.Node.get_node_fqdn(node),
            'status': node.status,
            'role': role,
            'vms_conf': node.vms_conf,
            'fail_if_error': role in self.critical_roles,
            # TODO(eli): need to remove, requried for the fake thread only
            'online': node.online,
        }

        net_serializer = self.get_net_provider_serializer(node.cluster)
        node_attrs.update(net_serializer.get_node_attrs(node))
        node_attrs.update(net_serializer.network_ranges(node.group_id))
        node_attrs.update(self.generate_test_vm_image_data(node))

        return node_attrs

    def generate_properties_arguments(self, properties_data):
        """build a string of properties from a key value hash"""
        properties = []
        for key, value in six.iteritems(properties_data):
            properties.append('--property {key}={value}'.format(key=key,
                                                                value=value))
        return ' '.join(properties)

    def generate_test_vm_image_data(self, node):
        # Instantiate all default values in dict.
        image_data = {
            'container_format': 'bare',
            'public': 'true',
            'disk_format': 'qcow2',
            'img_name': 'TestVM',
            'img_path': '',
            'os_name': 'cirros',
            'min_ram': 64,
            'glance_properties': '',
            'properties': {},
        }
        # Generate a right path to image.
        c_attrs = node.cluster.attributes
        if 'ubuntu' in c_attrs['generated']['cobbler']['profile']:
            img_dir = '/usr/share/cirros-testvm/'
        else:
            img_dir = '/opt/vm/'
        image_data['img_path'] = '{0}cirros-x86_64-disk.img'.format(img_dir)

        properties_data = {}

        # Alternate VMWare specific values.
        if c_attrs['editable']['common']['libvirt_type']['value'] == 'vcenter':
            image_data.update({
                'disk_format':
                'vmdk',
                'img_path':
                '{0}cirros-i386-disk.vmdk'.format(img_dir),
            })
            properties_data = {
                'vmware_disktype': 'sparse',
                'vmware_adaptertype': 'lsiLogic',
                'hypervisor_type': 'vmware'
            }

        # NOTE(aschultz): properties was added as part of N and should be
        # used infavor of glance_properties
        image_data['glance_properties'] = self.generate_properties_arguments(
            properties_data)
        image_data['properties'] = properties_data

        return {'test_vm_image': image_data}

    @classmethod
    def get_net_provider_serializer(cls, cluster):
        if cluster.net_provider == 'nova_network':
            return cls.nova_network_serializer
        else:
            return cls.neutron_network_serializer

    def filter_by_roles(self, nodes, roles):
        return filter(lambda node: node['role'] in roles, nodes)

    def set_deployment_priorities(self, nodes):
        if self.task_graph is not None:
            self.task_graph.add_priorities(nodes)

    def set_tasks(self, serialized_nodes):
        if self.task_graph is not None:
            for node in serialized_nodes:
                node['tasks'] = self.task_graph.deploy_task_serialize(node)

    def inject_list_of_plugins(self, attributes, cluster):
        """Added information about plugins to serialized attributes.

        :param attributes: the serialized attributes
        :param cluster: the cluster object
        """
        plugins = objects.ClusterPlugin.get_enabled(cluster.id)
        attributes['plugins'] = [
            self.serialize_plugin(cluster, p) for p in plugins
        ]

    @classmethod
    def serialize_plugin(cls, cluster, plugin):
        """Gets plugin information to include into serialized attributes.

        :param cluster: the cluster object
        :param plugin: the plugin object
        """
        return plugin['name']
Exemplo n.º 13
0
 def initialize(self, cluster):
     self.all_nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
     self.role_resolver = RoleResolver(self.all_nodes)
     self.initialized = cluster.id
Exemplo n.º 14
0
 def __init__(self, task, cluster, nodes, role_resolver=None):
     super(StandardConfigRolesHook, self).__init__(task, cluster)
     self.nodes = nodes
     self.role_resolver = role_resolver or RoleResolver(nodes)
Exemplo n.º 15
0
class TasksSerializer(object):
    """The deploy tasks serializer."""
    def __init__(self, cluster, nodes, task_ids=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)
        self.task_filter = self.make_task_filter(task_ids)

    @classmethod
    def serialize(cls, cluster, nodes, tasks, task_ids=None):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param tasks: the list of tasks
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes, task_ids)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks), nodes)
        serializer.resolve_dependencies()
        return dict((k, list(six.itervalues(v)))
                    for k, v in six.iteritems(serializer.tasks_per_node))

    def resolve_nodes(self, tasks, nodes):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :param nodes: the list of nodes to deploy
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        groups = list()

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                groups.append(task)
            else:
                tasks_mapping[task['id']] = task
                self.process_task(task,
                                  nodes,
                                  lambda _: self.role_resolver,
                                  skip=not self.task_filter(task['id']))

        self.expand_task_groups(groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_per_node.setdefault(None, dict())

    def process_task(self, task, nodes, resolver_factory, skip=False):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param nodes: the list of nodes
        :param resolver_factory: the factory creates role-resolver
        :param skip: make the task as skipped
        """

        serializer_factory = self.task_serializer.get_stage_serializer(task)
        task_serializer = serializer_factory(
            task, self.cluster, nodes, role_resolver=resolver_factory(nodes))
        # do not pass skipped attribute to astute
        skipped = skip or task.pop('skipped', False) or \
            not task_serializer.should_execute()
        for astute_task in self.task_processor.process_tasks(
                task, task_serializer.serialize()):
            # all skipped task shall have type skipped
            # do not exclude them from graph to keep connections between nodes
            if skipped:
                astute_task['type'] = \
                    consts.ORCHESTRATOR_TASK_TYPES.skipped

            for node_id in astute_task.pop('uids', ()):
                node_tasks = self.tasks_per_node[node_id]
                # de-duplication the tasks on node
                # since task can be added after expand group need to
                # overwrite if existed task is skipped and new is not skipped.
                if self.need_update_task(node_tasks, astute_task):
                    node_tasks[astute_task['id']] = copy.deepcopy(astute_task)

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_per_node):
            for task in six.itervalues(tasks):
                task['requires'] = list(
                    self.expand_dependencies(node_id, task.get('requires'),
                                             False))
                task['required_for'] = list(
                    self.expand_dependencies(node_id, task.get('required_for'),
                                             True))
                task['requires'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depends', None), False))

                task['required_for'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depended-by', None), True))
                task['requires'].extend(task.pop('requires_ex', ()))
                task['required_for'].extend(task.pop('required_for_ex', ()))

    def expand_task_groups(self, groups, task_mapping):
        """Expand group of tasks.

        :param groups: the all tasks with type 'group'
        :param task_mapping: the mapping task id to task object
        """
        for task in groups:
            skipped = not self.task_filter(task['id'])
            node_ids = self.role_resolver.resolve(task.get('role', ()))
            for sub_task_id in task.get('tasks', ()):
                try:
                    sub_task = task_mapping[sub_task_id]
                except KeyError:
                    raise errors.InvalidData('Task %s cannot be resolved',
                                             sub_task_id)

                # if group is not excluded, all task should be run as well
                # otherwise check each task individually
                self.process_task(sub_task,
                                  node_ids,
                                  NullResolver,
                                  skip=skipped
                                  and not self.task_filter(sub_task_id))

    def expand_dependencies(self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            for rel in self.resolve_relation(name, node_ids, is_required_for):
                yield rel

    def expand_cross_dependencies(self, node_id, dependencies,
                                  is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all))
            relations = self.resolve_relation(dep['name'], node_ids,
                                              is_required_for)
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, is_required_for):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param is_required_for: means task from required_for section
        """
        found = False
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_per_node[node_id]:
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    found = True
                    yield {"name": task_name, "node_id": node_id}
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                found = True
                applied_tasks.add(original_task)
                if original_task is not task_name:
                    if is_required_for:
                        task_name_gen = self.task_processor.get_first_task_id
                    else:
                        task_name_gen = self.task_processor.get_last_task_id
                    task_name = task_name_gen(original_task)

                yield {"name": task_name, "node_id": node_id}

        if not found:
            logger.warning(
                "Dependency '%s' cannot be resolved: "
                "no candidates in nodes '%s'.", name,
                ", ".join(six.moves.map(str, node_ids)))

    @classmethod
    def need_update_task(cls, tasks, task):
        """Checks that task shall overwrite existed one or should be added.

        :param tasks: the current node tasks
        :param task: the astute task object
        :return True if task is not present or must be overwritten
                otherwise False
        """
        existed_task = tasks.get(task['id'])
        if existed_task is None:
            return True

        if existed_task['type'] == task['type']:
            return False

        return task['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped

    @classmethod
    def make_task_filter(cls, task_ids):
        """Makes task filter according to specified ids.

        :param task_ids: the selected  ids of tasks
        :return: function that check task
        """
        if task_ids is None:
            return lambda _: True

        if not isinstance(task_ids, set):
            task_ids = set(task_ids)

        return lambda task_id: task_id in task_ids
class TasksSerializer(object):
    """The deploy tasks serializer."""
    def __init__(self, cluster, nodes):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)

    @classmethod
    def serialize(cls, cluster, nodes, tasks):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param tasks: the list of tasks
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks), nodes)
        serializer.resolve_dependencies()
        return dict((k, list(six.itervalues(v)))
                    for k, v in six.iteritems(serializer.tasks_per_node))

    def resolve_nodes(self, tasks, nodes):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :param nodes: the list of nodes to deploy
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        tasks_groups = collections.defaultdict(set)

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                tasks_for_role = task.get('tasks')
                if tasks_for_role:
                    tasks_groups[tuple(task.get('role',
                                                ()))].update(tasks_for_role)
                continue
            tasks_mapping[task['id']] = task
            self.process_task(task, nodes, lambda _: self.role_resolver)

        self.expand_task_groups(tasks_groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_per_node.setdefault(None, dict())

    def process_task(self, task, nodes, resolver_factory):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param nodes: the list of nodes
        :param resolver_factory: the factory creates role-resolver
        """

        serializer_factory = self.task_serializer.get_stage_serializer(task)
        task_serializer = serializer_factory(
            task, self.cluster, nodes, role_resolver=resolver_factory(nodes))
        # do not pass skipped attribute to astute
        skipped = task.pop('skipped', False) or \
            not task_serializer.should_execute()
        for astute_task in self.task_processor.process_tasks(
                task, task_serializer.serialize()):
            # all skipped task shall have type skipped
            if skipped:
                astute_task['type'] = \
                    consts.ORCHESTRATOR_TASK_TYPES.skipped

            for node_id in astute_task.pop('uids', ()):
                node_tasks = self.tasks_per_node[node_id]
                # de-duplication the tasks on node
                if astute_task['id'] in node_tasks:
                    continue
                node_tasks[astute_task['id']] = copy.deepcopy(astute_task)

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_per_node):
            for task in six.itervalues(tasks):
                task['requires'] = list(
                    self.expand_dependencies(node_id, task.get('requires'),
                                             False))
                task['required_for'] = list(
                    self.expand_dependencies(node_id, task.get('required_for'),
                                             True))
                task['requires'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depends', None), False))

                task['required_for'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depended-by', None), True))
                task['requires'].extend(task.pop('requires_ex', ()))
                task['required_for'].extend(task.pop('required_for_ex', ()))

    def expand_task_groups(self, tasks_per_role, task_mapping):
        """Expand group of tasks.

        :param tasks_per_role: the set of tasks per role
        :param task_mapping: the mapping task id to task object
        """
        for roles, task_ids in six.iteritems(tasks_per_role):
            for task_id in task_ids:
                try:
                    task = task_mapping[task_id]
                except KeyError:
                    raise errors.InvalidData('Task %s cannot be resolved',
                                             task_id)

                for node_id in self.role_resolver.resolve(roles):
                    self.process_task(task, [node_id], NullResolver)

    def expand_dependencies(self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            for rel in self.resolve_relation(name, node_ids, is_required_for):
                yield rel

    def expand_cross_dependencies(self, node_id, dependencies,
                                  is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all))
            relations = self.resolve_relation(dep['name'], node_ids,
                                              is_required_for)
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, is_required_for):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param is_required_for: means task from required_for section
        """
        found = False
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_per_node[node_id]:
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    found = True
                    yield {"name": task_name, "node_id": node_id}
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                found = True
                applied_tasks.add(original_task)
                if original_task is not task_name:
                    if is_required_for:
                        task_name_gen = self.task_processor.get_first_task_id
                    else:
                        task_name_gen = self.task_processor.get_last_task_id
                    task_name = task_name_gen(original_task)

                yield {"name": task_name, "node_id": node_id}

        if not found:
            logger.warning(
                "Dependency '%s' cannot be resolved: "
                "no candidates in nodes '%s'.", name,
                ", ".join(six.moves.map(str, node_ids)))
Exemplo n.º 17
0
class DeploymentMultinodeSerializer(object):
    nova_network_serializer = nova_serializers.NovaNetworkDeploymentSerializer
    neutron_network_serializer = \
        neutron_serializers.NeutronNetworkDeploymentSerializer

    critical_roles = frozenset(('controller', 'ceph-osd', 'primary-mongo'))

    def __init__(self, tasks_graph=None):
        self.task_graph = tasks_graph
        self.all_nodes = None
        self.role_resolver = None
        self.initialized = None

    def initialize(self, cluster):
        self.all_nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
        self.role_resolver = RoleResolver(self.all_nodes)
        self.initialized = cluster.id

    def finalize(self):
        self.all_nodes = None
        self.role_resolver = None
        self.initialized = None

    def _ensure_initialized_for(self, cluster):
        # TODO(bgaifullin) need to move initialize into __init__
        if self.initialized != cluster.id:
            self.initialize(cluster)

    def serialize(self, cluster, nodes, ignore_customized=False):
        """Method generates facts which are passed to puppet."""
        try:
            self.initialize(cluster)
            serialized_nodes = []

            origin_nodes = []
            customized_nodes = []
            if ignore_customized:
                origin_nodes = nodes
            else:
                for node in nodes:
                    if node.replaced_deployment_info:
                        customized_nodes.append(node)
                    else:
                        origin_nodes.append(node)

            serialized_nodes.extend(
                self.serialize_generated(cluster, origin_nodes)
            )
            serialized_nodes.extend(
                self.serialize_customized(cluster, customized_nodes)
            )

            # NOTE(dshulyak) tasks should not be preserved from replaced
            #  deployment info, there is different mechanism to control
            #  changes in tasks introduced during granular deployment,
            #  and that mech should be used
            self.set_tasks(serialized_nodes)
        finally:
            self.finalize()

        return serialized_nodes

    def serialize_generated(self, cluster, nodes):
        nodes = self.serialize_nodes(nodes)
        common_attrs = self.get_common_attrs(cluster)

        self.set_deployment_priorities(nodes)
        for node in nodes:
            yield utils.dict_merge(node, common_attrs)

    def serialize_customized(self, cluster, nodes):
        for node in nodes:
            for role_data in node.replaced_deployment_info:
                yield role_data

    def get_common_attrs(self, cluster):
        """Cluster attributes."""

        # tests call this method directly.
        # and we need this workaround to avoid refactoring a lot of tests.
        self._ensure_initialized_for(cluster)
        attrs = objects.Cluster.get_attributes(cluster)
        attrs = objects.Attributes.merged_attrs_values(attrs)

        attrs['deployment_mode'] = cluster.mode
        attrs['deployment_id'] = cluster.id
        attrs['openstack_version'] = cluster.release.version
        attrs['fuel_version'] = cluster.fuel_version
        attrs['nodes'] = self.node_list(self.all_nodes)

        # Adding params to workloads_collector
        if 'workloads_collector' not in attrs:
            attrs['workloads_collector'] = {}
        attrs['workloads_collector']['create_user'] = \
            objects.MasterNodeSettings.must_send_stats()
        username = attrs['workloads_collector'].pop('user', None)
        attrs['workloads_collector']['username'] = username

        if self.role_resolver.resolve(['cinder']):
            attrs['use_cinder'] = True

        net_serializer = self.get_net_provider_serializer(cluster)
        net_common_attrs = net_serializer.get_common_attrs(cluster, attrs)
        attrs = utils.dict_merge(attrs, net_common_attrs)

        self.inject_list_of_plugins(attrs, cluster)

        return attrs

    @classmethod
    def node_list(cls, nodes):
        """Generate nodes list. Represents as "nodes" parameter in facts."""
        node_list = []

        for node in nodes:
            for role in objects.Node.all_roles(node):
                node_list.append(cls.serialize_node_for_node_list(node, role))

        return node_list

    @classmethod
    def serialize_node_for_node_list(cls, node, role):
        return {
            'uid': node.uid,
            'fqdn': objects.Node.get_node_fqdn(node),
            'name': objects.Node.get_slave_name(node),
            'role': role}

    # TODO(apopovych): we have more generical method 'filter_by_roles'
    def by_role(self, nodes, role):
        return filter(lambda node: node['role'] == role, nodes)

    def not_roles(self, nodes, roles):
        return filter(lambda node: node['role'] not in roles, nodes)

    def serialize_nodes(self, nodes):
        """Serialize node for each role.

        For example if node has two roles then
        in orchestrator will be passed two serialized
        nodes.
        """
        serialized_nodes = []
        for node in nodes:
            for role in objects.Node.all_roles(node):
                serialized_nodes.append(self.serialize_node(node, role))
        return serialized_nodes

    def serialize_node(self, node, role):
        """Serialize node, then it will be merged with common attributes."""
        node_attrs = {
            # Yes, uid is really should be a string
            'uid': node.uid,
            'fqdn': objects.Node.get_node_fqdn(node),
            'status': node.status,
            'role': role,
            'vms_conf': node.vms_conf,
            'fail_if_error': role in self.critical_roles,
            # TODO(eli): need to remove, requried for the fake thread only
            'online': node.online,
        }

        net_serializer = self.get_net_provider_serializer(node.cluster)
        node_attrs.update(net_serializer.get_node_attrs(node))
        node_attrs.update(net_serializer.network_ranges(node.group_id))
        node_attrs.update(self.generate_test_vm_image_data(node))

        return node_attrs

    def generate_properties_arguments(self, properties_data):
        """build a string of properties from a key value hash"""
        properties = []
        for key, value in six.iteritems(properties_data):
            properties.append('--property {key}={value}'.format(
                key=key, value=value))
        return ' '.join(properties)

    def generate_test_vm_image_data(self, node):
        # Instantiate all default values in dict.
        image_data = {
            'container_format': 'bare',
            'public': 'true',
            'disk_format': 'qcow2',
            'img_name': 'TestVM',
            'img_path': '',
            'os_name': 'cirros',
            'min_ram': 64,
            'glance_properties': '',
            'properties': {},
        }
        # Generate a right path to image.
        c_attrs = node.cluster.attributes
        if 'ubuntu' in c_attrs['generated']['cobbler']['profile']:
            img_dir = '/usr/share/cirros-testvm/'
        else:
            img_dir = '/opt/vm/'
        image_data['img_path'] = '{0}cirros-x86_64-disk.img'.format(img_dir)

        properties_data = {}

        # Alternate VMWare specific values.
        if c_attrs['editable']['common']['libvirt_type']['value'] == 'vcenter':
            image_data.update({
                'disk_format': 'vmdk',
                'img_path': '{0}cirros-i386-disk.vmdk'.format(img_dir),
            })
            properties_data = {
                'vmware_disktype': 'sparse',
                'vmware_adaptertype': 'lsiLogic',
                'hypervisor_type': 'vmware'
            }

        # NOTE(aschultz): properties was added as part of N and should be
        # used infavor of glance_properties
        image_data['glance_properties'] = self.generate_properties_arguments(
            properties_data)
        image_data['properties'] = properties_data

        return {'test_vm_image': image_data}

    @classmethod
    def get_net_provider_serializer(cls, cluster):
        if cluster.net_provider == 'nova_network':
            return cls.nova_network_serializer
        else:
            return cls.neutron_network_serializer

    def filter_by_roles(self, nodes, roles):
        return filter(
            lambda node: node['role'] in roles, nodes)

    def set_deployment_priorities(self, nodes):
        if self.task_graph is not None:
            self.task_graph.add_priorities(nodes)

    def set_tasks(self, serialized_nodes):
        if self.task_graph is not None:
            for node in serialized_nodes:
                node['tasks'] = self.task_graph.deploy_task_serialize(node)

    def inject_list_of_plugins(self, attributes, cluster):
        """Added information about plugins to serialized attributes.

        :param attributes: the serialized attributes
        :param cluster: the cluster object
        """
        plugins = objects.ClusterPlugin.get_enabled(cluster.id)
        attributes['plugins'] = [
            self.serialize_plugin(cluster, p) for p in plugins
        ]

    @classmethod
    def serialize_plugin(cls, cluster, plugin):
        """Gets plugin information to include into serialized attributes.

        :param cluster: the cluster object
        :param plugin: the plugin object
        """
        return plugin['name']
Exemplo n.º 18
0
class TasksSerializer(object):
    """The deploy tasks serializer."""
    def __init__(self,
                 cluster,
                 nodes,
                 affected_nodes=None,
                 task_ids=None,
                 events=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param affected_nodes: the list of nodes, that affected by deployment
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        """
        if affected_nodes:
            self.affected_node_ids = frozenset(n.uid for n in affected_nodes)
            self.deployment_nodes = copy.copy(nodes)
            self.deployment_nodes.extend(affected_nodes)
        else:
            self.deployment_nodes = nodes
            self.affected_node_ids = frozenset()
        self.cluster = cluster
        self.role_resolver = RoleResolver(self.deployment_nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_connections = collections.defaultdict(dict)
        self.tasks_dictionary = dict()
        self.task_filter = self.make_task_filter(task_ids)
        self.events = events

    @classmethod
    def serialize(cls,
                  cluster,
                  nodes,
                  tasks,
                  affected_nodes=None,
                  task_ids=None,
                  events=None):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param affected_nodes: the list of nodes, that affected by deployment
        :param tasks: the list of tasks
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes, affected_nodes, task_ids, events)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks))
        serializer.resolve_dependencies()
        tasks_dictionary = serializer.tasks_dictionary
        tasks_connections = serializer.tasks_connections
        for node_id in tasks_connections:
            tasks_connections[node_id] = list(
                six.itervalues(tasks_connections[node_id]))
        return tasks_dictionary, tasks_connections

    def resolve_nodes(self, tasks):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        groups = list()

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                groups.append(task)
            else:
                tasks_mapping[task['id']] = task
                skip = not self.task_filter(task['id'])
                self.process_task(task, self.role_resolver, skip)

        self.expand_task_groups(groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_connections.setdefault(None, dict())

    def process_task(self, task, role_resolver, skip=False):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param role_resolver: the role resolver
        :param skip: make the task as skipped
        """

        serializer_factory = self.task_serializer.get_stage_serializer(task)
        task_serializer = serializer_factory(task,
                                             self.cluster,
                                             self.deployment_nodes,
                                             role_resolver=role_resolver)
        skipped = skip or not task_serializer.should_execute()
        force = self.events and self.events.check_subscription(task)
        if skipped and not force:
            # Do not call real serializer if it should be skipped
            task_serializer = NoopSerializer(task,
                                             self.cluster,
                                             self.deployment_nodes,
                                             role_resolver=role_resolver)

        serialised_tasks = self.task_processor.process_tasks(
            task, task_serializer.serialize())
        for serialized in serialised_tasks:
            # all skipped task shall have type skipped
            # do not exclude them from graph to keep connections between nodes

            if skipped:
                task_type = consts.ORCHESTRATOR_TASK_TYPES.skipped
            else:
                task_type = serialized['type']

            task_relations = {
                'id': serialized['id'],
                'type': task_type,
                'requires': serialized.pop('requires', []),
                'required_for': serialized.pop('required_for', []),
                'cross_depends': serialized.pop('cross_depends', []),
                'cross_depended_by': serialized.pop('cross_depended_by', []),
                'requires_ex': serialized.pop('requires_ex', []),
                'required_for_ex': serialized.pop('required_for_ex', [])
            }
            node_ids = serialized.pop('uids', ())
            self.tasks_dictionary[serialized['id']] = serialized
            for node_id in node_ids:
                node_task = task_relations.copy()
                if not force and node_id in self.affected_node_ids:
                    node_task['type'] = consts.ORCHESTRATOR_TASK_TYPES.skipped

                node_tasks = self.tasks_connections[node_id]
                # de-duplication the tasks on node
                # since task can be added after expand group need to
                # overwrite if existed task is skipped and new is not skipped.
                if self.need_update_task(node_tasks, node_task):
                    node_tasks[serialized['id']] = node_task

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_connections):
            for task in six.itervalues(tasks):
                requires = set(
                    self.expand_dependencies(
                        node_id, task.pop('requires', None),
                        self.task_processor.get_last_task_id))
                requires.update(
                    self.expand_cross_dependencies(
                        task['id'],
                        node_id,
                        task.pop('cross_depends', None),
                        self.task_processor.get_last_task_id,
                    ))
                requires.update(task.pop('requires_ex', ()))

                required_for = set(
                    self.expand_dependencies(
                        node_id, task.pop('required_for', None),
                        self.task_processor.get_first_task_id))
                required_for.update(
                    self.expand_cross_dependencies(
                        task['id'], node_id, task.pop('cross_depended_by',
                                                      None),
                        self.task_processor.get_first_task_id))
                required_for.update(task.pop('required_for_ex', ()))
                # render
                if requires:
                    task['requires'] = [
                        dict(six.moves.zip(('name', 'node_id'), r))
                        for r in requires
                    ]
                if required_for:
                    task['required_for'] = [
                        dict(six.moves.zip(('name', 'node_id'), r))
                        for r in required_for
                    ]

    def expand_task_groups(self, groups, task_mapping):
        """Expand group of tasks.

        :param groups: the all tasks with type 'group'
        :param task_mapping: the mapping task id to task object
        """
        for task in groups:
            skipped = not self.task_filter(task['id'])
            node_ids = self.role_resolver.resolve(task.get('role', ()))
            for sub_task_id in task.get('tasks', ()):
                try:
                    sub_task = task_mapping[sub_task_id]
                except KeyError:
                    raise errors.InvalidData('Task %s cannot be resolved',
                                             sub_task_id)

                # if group is not excluded, all task should be run as well
                # otherwise check each task individually
                self.process_task(sub_task,
                                  NullResolver(node_ids),
                                  skip=skipped
                                  and not self.task_filter(sub_task_id))

    def expand_dependencies(self, node_id, dependencies, task_resolver):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param task_resolver: the task name resolver
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            relations = self.resolve_relation(name, node_ids, task_resolver,
                                              [])
            for rel in relations:
                yield rel

    def expand_cross_dependencies(self, task_id, node_id, dependencies,
                                  task_resolver):
        """Expands task dependencies on same node.

        :param task_id: the ID of the task, for which we resolve the dependency
        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param task_resolver: the task name resolver
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
                excludes = []
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all))
                excludes = [(node_id, task_id)]

            relations = self.resolve_relation(dep['name'], node_ids,
                                              task_resolver, excludes)
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, task_resolver, excludes):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param task_resolver: the task name resolver
        :param excludes: the nodes to exclude
        """
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_connections[node_id]:
                if (node_id, task_name) in excludes:
                    continue
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    yield task_name, node_id
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                applied_tasks.add(original_task)
                if original_task is not task_name:
                    task_name = task_resolver(original_task)

                yield task_name, node_id

    @classmethod
    def need_update_task(cls, tasks, task):
        """Checks that task shall overwrite existed one or should be added.

        :param tasks: the current node tasks
        :param task: the astute task object
        :return True if task is not present or must be overwritten
                otherwise False
        """
        existed_task = tasks.get(task['id'])
        if existed_task is None:
            return True

        if existed_task['type'] == task['type']:
            return False

        return task['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped

    @classmethod
    def make_task_filter(cls, task_ids):
        """Makes task filter according to specified ids.

        :param task_ids: the selected  ids of tasks
        :return: function that check task
        """
        if not task_ids:
            return lambda _: True

        if not isinstance(task_ids, set):
            task_ids = set(task_ids)

        return lambda task_id: task_id in task_ids
class TasksSerializer(object):
    """The deploy tasks serializer."""

    def __init__(self, cluster, nodes):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        """
        self.cluster = cluster
        self.role_resolver = RoleResolver(nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_per_node = collections.defaultdict(dict)

    @classmethod
    def serialize(cls, cluster, nodes, tasks):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param tasks: the list of tasks
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks), nodes)
        serializer.resolve_dependencies()
        return dict(
            (k, list(six.itervalues(v)))
            for k, v in six.iteritems(serializer.tasks_per_node)
        )

    def resolve_nodes(self, tasks, nodes):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :param nodes: the list of nodes to deploy
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        tasks_groups = collections.defaultdict(set)

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                tasks_for_role = task.get('tasks')
                if tasks_for_role:
                    tasks_groups[tuple(task.get('role', ()))].update(
                        tasks_for_role
                    )
                continue
            tasks_mapping[task['id']] = task
            self.process_task(task, nodes, lambda _: self.role_resolver)

        self.expand_task_groups(tasks_groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_per_node.setdefault(None, dict())

    def process_task(self, task, nodes, resolver_factory):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param nodes: the list of nodes
        :param resolver_factory: the factory creates role-resolver
        """

        serializer_factory = self.task_serializer.get_stage_serializer(
            task
        )
        task_serializer = serializer_factory(
            task, self.cluster, nodes, role_resolver=resolver_factory(nodes)
        )
        # do not pass skipped attribute to astute
        skipped = task.pop('skipped', False) or \
            not task_serializer.should_execute()
        for astute_task in self.task_processor.process_tasks(
                task, task_serializer.serialize()):
            # all skipped task shall have type skipped
            if skipped:
                astute_task['type'] = \
                    consts.ORCHESTRATOR_TASK_TYPES.skipped

            for node_id in astute_task.pop('uids', ()):
                node_tasks = self.tasks_per_node[node_id]
                # de-duplication the tasks on node
                if astute_task['id'] in node_tasks:
                    continue
                node_tasks[astute_task['id']] = copy.deepcopy(astute_task)

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_per_node):
            for task in six.itervalues(tasks):
                task['requires'] = list(
                    self.expand_dependencies(
                        node_id, task.get('requires'), False
                    )
                )
                task['required_for'] = list(
                    self.expand_dependencies(
                        node_id, task.get('required_for'), True
                    )
                )
                task['requires'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depends', None), False
                    )
                )

                task['required_for'].extend(
                    self.expand_cross_dependencies(
                        node_id, task.pop('cross-depended-by', None), True
                    )
                )
                task['requires'].extend(task.pop('requires_ex', ()))
                task['required_for'].extend(task.pop('required_for_ex', ()))

    def expand_task_groups(self, tasks_per_role, task_mapping):
        """Expand group of tasks.

        :param tasks_per_role: the set of tasks per role
        :param task_mapping: the mapping task id to task object
        """
        for roles, task_ids in six.iteritems(tasks_per_role):
            for task_id in task_ids:
                try:
                    task = task_mapping[task_id]
                except KeyError:
                    raise errors.InvalidData(
                        'Task %s cannot be resolved', task_id
                    )

                for node_id in self.role_resolver.resolve(roles):
                    self.process_task(task, [node_id], NullResolver)

    def expand_dependencies(self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            for rel in self.resolve_relation(name, node_ids, is_required_for):
                yield rel

    def expand_cross_dependencies(
            self, node_id, dependencies, is_required_for):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param is_required_for: means task from required_for section
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all)
                )
            relations = self.resolve_relation(
                dep['name'], node_ids, is_required_for
            )
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, is_required_for):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param is_required_for: means task from required_for section
        """
        found = False
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_per_node[node_id]:
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    found = True
                    yield {"name": task_name, "node_id": node_id}
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                found = True
                applied_tasks.add(original_task)
                if original_task is not task_name:
                    if is_required_for:
                        task_name_gen = self.task_processor.get_first_task_id
                    else:
                        task_name_gen = self.task_processor.get_last_task_id
                    task_name = task_name_gen(original_task)

                yield {"name": task_name, "node_id": node_id}

        if not found:
            logger.warning(
                "Dependency '%s' cannot be resolved: "
                "no candidates in nodes '%s'.",
                name, ", ".join(six.moves.map(str, node_ids))
            )
Exemplo n.º 20
0
class TasksSerializer(object):
    """The deploy tasks serializer."""

    def __init__(self, cluster, nodes,
                 affected_nodes=None, task_ids=None, events=None):
        """Initializes.

        :param cluster: Cluster instance
        :param nodes: the sequence of nodes for deploy
        :param affected_nodes: the list of nodes, that affected by deployment
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        """
        if affected_nodes:
            self.affected_node_ids = frozenset(n.uid for n in affected_nodes)
            self.deployment_nodes = copy.copy(nodes)
            self.deployment_nodes.extend(affected_nodes)
        else:
            self.deployment_nodes = nodes
            self.affected_node_ids = frozenset()
        self.cluster = cluster
        self.role_resolver = RoleResolver(self.deployment_nodes)
        self.task_serializer = DeployTaskSerializer()
        self.task_processor = TaskProcessor()
        self.tasks_connections = collections.defaultdict(dict)
        self.tasks_dictionary = dict()
        self.task_filter = self.make_task_filter(task_ids)
        self.events = events

    @classmethod
    def serialize(cls, cluster, nodes, tasks,
                  affected_nodes=None, task_ids=None, events=None):
        """Resolves roles and dependencies for tasks.

        :param cluster: the cluster instance
        :param nodes: the list of nodes
        :param affected_nodes: the list of nodes, that affected by deployment
        :param tasks: the list of tasks
        :param task_ids: Only specified tasks will be executed,
                         If None, all tasks will be executed
        :param events: the events (see TaskEvents)
        :return: the list of serialized task per node
        """
        serializer = cls(cluster, nodes, affected_nodes, task_ids, events)
        serializer.resolve_nodes(add_plugin_deployment_hooks(tasks))
        serializer.resolve_dependencies()
        tasks_dictionary = serializer.tasks_dictionary
        tasks_connections = serializer.tasks_connections
        for node_id in tasks_connections:
            tasks_connections[node_id] = list(
                six.itervalues(tasks_connections[node_id])
            )
        return tasks_dictionary, tasks_connections

    def resolve_nodes(self, tasks):
        """Resolves node roles in tasks.

        :param tasks: the deployment tasks
        :return the mapping tasks per node
        """

        tasks_mapping = dict()
        groups = list()

        for task in tasks:
            if task.get('type') == consts.ORCHESTRATOR_TASK_TYPES.group:
                groups.append(task)
            else:
                tasks_mapping[task['id']] = task
                skip = not self.task_filter(task['id'])
                self.process_task(task, self.role_resolver, skip)

        self.expand_task_groups(groups, tasks_mapping)
        # make sure that null node is present
        self.tasks_connections.setdefault(None, dict())

    def process_task(self, task, role_resolver, skip=False):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param role_resolver: the role resolver
        :param skip: make the task as skipped
        """

        serializer_factory = self.task_serializer.get_stage_serializer(
            task
        )
        task_serializer = serializer_factory(
            task, self.cluster, self.deployment_nodes,
            role_resolver=role_resolver
        )
        skipped = skip or not task_serializer.should_execute()
        force = self.events and self.events.check_subscription(task)
        if skipped and not force:
            # Do not call real serializer if it should be skipped
            task_serializer = NoopSerializer(
                task, self.cluster, self.deployment_nodes,
                role_resolver=role_resolver
            )

        serialised_tasks = self.task_processor.process_tasks(
            task, task_serializer.serialize()
        )
        for serialized in serialised_tasks:
            # all skipped task shall have type skipped
            # do not exclude them from graph to keep connections between nodes

            if skipped:
                task_type = consts.ORCHESTRATOR_TASK_TYPES.skipped
            else:
                task_type = serialized['type']

            task_relations = {
                'id': serialized['id'],
                'type': task_type,
                'requires': serialized.pop('requires', []),
                'required_for': serialized.pop('required_for', []),
                'cross_depends': serialized.pop('cross_depends', []),
                'cross_depended_by': serialized.pop('cross_depended_by', []),
                'requires_ex': serialized.pop('requires_ex', []),
                'required_for_ex': serialized.pop('required_for_ex', [])
            }
            node_ids = serialized.pop('uids', ())
            self.tasks_dictionary[serialized['id']] = serialized
            for node_id in node_ids:
                node_task = task_relations.copy()
                if not force and node_id in self.affected_node_ids:
                    node_task['type'] = consts.ORCHESTRATOR_TASK_TYPES.skipped

                node_tasks = self.tasks_connections[node_id]
                # de-duplication the tasks on node
                # since task can be added after expand group need to
                # overwrite if existed task is skipped and new is not skipped.
                if self.need_update_task(node_tasks, node_task):
                    node_tasks[serialized['id']] = node_task

    def resolve_dependencies(self):
        """Resolves tasks dependencies."""

        for node_id, tasks in six.iteritems(self.tasks_connections):
            for task in six.itervalues(tasks):
                requires = set(self.expand_dependencies(
                    node_id, task.pop('requires', None),
                    self.task_processor.get_last_task_id
                ))
                requires.update(self.expand_cross_dependencies(
                    task['id'], node_id, task.pop('cross_depends', None),
                    self.task_processor.get_last_task_id,
                ))
                requires.update(task.pop('requires_ex', ()))

                required_for = set(self.expand_dependencies(
                    node_id, task.pop('required_for', None),
                    self.task_processor.get_first_task_id
                ))
                required_for.update(self.expand_cross_dependencies(
                    task['id'], node_id, task.pop('cross_depended_by', None),
                    self.task_processor.get_first_task_id
                ))
                required_for.update(task.pop('required_for_ex', ()))
                # render
                if requires:
                    task['requires'] = [
                        dict(six.moves.zip(('name', 'node_id'), r))
                        for r in requires
                    ]
                if required_for:
                    task['required_for'] = [
                        dict(six.moves.zip(('name', 'node_id'), r))
                        for r in required_for
                    ]

    def expand_task_groups(self, groups, task_mapping):
        """Expand group of tasks.

        :param groups: the all tasks with type 'group'
        :param task_mapping: the mapping task id to task object
        """
        for task in groups:
            skipped = not self.task_filter(task['id'])
            node_ids = self.role_resolver.resolve(task.get('role', ()))
            for sub_task_id in task.get('tasks', ()):
                try:
                    sub_task = task_mapping[sub_task_id]
                except KeyError:
                    raise errors.InvalidData(
                        'Task %s cannot be resolved', sub_task_id
                    )

                # if group is not excluded, all task should be run as well
                # otherwise check each task individually
                self.process_task(
                    sub_task, NullResolver(node_ids),
                    skip=skipped and not self.task_filter(sub_task_id)
                )

    def expand_dependencies(self, node_id, dependencies, task_resolver):
        """Expands task dependencies on same node.

        :param node_id: the ID of target node
        :param dependencies: the list of dependencies on same node
        :param task_resolver: the task name resolver
        """
        if not dependencies:
            return

        # need to search dependencies on node and in sync points
        node_ids = [node_id, None]
        for name in dependencies:
            relations = self.resolve_relation(
                name, node_ids, task_resolver, []
            )
            for rel in relations:
                yield rel

    def expand_cross_dependencies(
            self, task_id, node_id, dependencies, task_resolver):
        """Expands task dependencies on same node.

        :param task_id: the ID of the task, for which we resolve the dependency
        :param node_id: the ID of target node
        :param dependencies: the list of cross-node dependencies
        :param task_resolver: the task name resolver
        """
        if not dependencies:
            return

        for dep in dependencies:
            roles = dep.get('role', consts.TASK_ROLES.all)

            if roles == consts.TASK_ROLES.self:
                node_ids = [node_id]
                excludes = []
            else:
                node_ids = self.role_resolver.resolve(
                    roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all)
                )
                excludes = [(node_id, task_id)]

            relations = self.resolve_relation(
                dep['name'], node_ids, task_resolver, excludes
            )
            for rel in relations:
                yield rel

    def resolve_relation(self, name, node_ids, task_resolver, excludes):
        """Resolves the task relation.

        :param name: the name of task
        :param node_ids: the ID of nodes where need to search
        :param task_resolver: the task name resolver
        :param excludes: the nodes to exclude
        """
        match_policy = NameMatchingPolicy.create(name)
        for node_id in node_ids:
            applied_tasks = set()
            for task_name in self.tasks_connections[node_id]:
                if (node_id, task_name) in excludes:
                    continue
                if task_name == name:
                    # the simple case when name of current task
                    # is exact math to name of task that is search
                    yield task_name, node_id
                    continue

                # at first get the original task name, actual
                # when current task is part of chain
                original_task = self.task_processor.get_origin(task_name)
                if original_task in applied_tasks or \
                        not match_policy.match(original_task):
                    continue

                applied_tasks.add(original_task)
                if original_task is not task_name:
                    task_name = task_resolver(original_task)

                yield task_name, node_id

    @classmethod
    def need_update_task(cls, tasks, task):
        """Checks that task shall overwrite existed one or should be added.

        :param tasks: the current node tasks
        :param task: the astute task object
        :return True if task is not present or must be overwritten
                otherwise False
        """
        existed_task = tasks.get(task['id'])
        if existed_task is None:
            return True

        if existed_task['type'] == task['type']:
            return False

        return task['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped

    @classmethod
    def make_task_filter(cls, task_ids):
        """Makes task filter according to specified ids.

        :param task_ids: the selected  ids of tasks
        :return: function that check task
        """
        if not task_ids:
            return lambda _: True

        if not isinstance(task_ids, set):
            task_ids = set(task_ids)

        return lambda task_id: task_id in task_ids