Esempio n. 1
0
 def get_dpdk_queues_count(cls, instance):
     dpdk_cpu_pinning = utils.get_in(instance.node.attributes,
                                     'cpu_pinning', 'dpdk', 'value') or 0
     max_queues = utils.get_in(instance.meta, 'max_queues') or 0
     # Number CPU for ovs_pmd_core_mask equals number DPDK CPU pinning - 1
     # 1 CPU is needed for ovs_core_mask
     pmd_core_count = dpdk_cpu_pinning - 1 if dpdk_cpu_pinning > 0 else 0
     return min(max_queues, pmd_core_count)
Esempio n. 2
0
 def get_dpdk_queues_count(cls, instance):
     dpdk_cpu_pinning = utils.get_in(instance.node.attributes,
                                     'cpu_pinning', 'dpdk', 'value') or 0
     max_queues = utils.get_in(instance.meta, 'max_queues') or 0
     # Number CPU for ovs_pmd_core_mask equals number DPDK CPU pinning - 1
     # 1 CPU is needed for ovs_core_mask
     pmd_core_count = dpdk_cpu_pinning - 1 if dpdk_cpu_pinning > 0 else 0
     return min(max_queues, pmd_core_count)
Esempio n. 3
0
    def process_cluster_attributes(cls, cluster, attributes):
        """Generate Cluster-Plugins relation based on attributes.

        Iterates through plugins attributes, creates
        or deletes Cluster <-> Plugins relation if plugin
        is enabled or disabled.

        :param cluster: A cluster instance
        :type cluster: nailgun.db.sqlalchemy.models.cluster.Cluster
        :param attributes: Cluster attributes
        :type attributes: dict
        """
        from nailgun.objects import Release

        plugins = {}

        # Detach plugins data
        for k in list(attributes):
            if cls.is_plugin_data(attributes[k]):
                plugins[k] = attributes.pop(k)['metadata']

        propagate_task_deploy = get_in(
            attributes, 'common', 'propagate_task_deploy', 'value')
        if propagate_task_deploy is not None:
            legacy_tasks_are_ignored = not propagate_task_deploy
        else:
            legacy_tasks_are_ignored = not get_in(
                cluster.attributes.editable,
                'common', 'propagate_task_deploy', 'value')

        for container in six.itervalues(plugins):
            default = container.get('default', False)
            for attrs in container.get('versions', []):
                version_metadata = attrs.pop('metadata')
                plugin_id = version_metadata['plugin_id']
                plugin = Plugin.get_by_uid(plugin_id)
                if not plugin:
                    logger.warning(
                        'Plugin with id "%s" is not found, skip it', plugin_id)
                    continue
                enabled = container['enabled'] \
                    and plugin_id == container['chosen_id']
                if (enabled and
                        Release.is_lcm_supported(cluster.release) and
                        legacy_tasks_are_ignored and
                        cls.contains_legacy_tasks(
                            wrap_plugin(Plugin.get_by_uid(plugin.id)))):
                    raise errors.InvalidData(
                        'Cannot enable plugin with legacy tasks unless '
                        'propagate_task_deploy attribute is set. '
                        'Ensure tasks.yaml is empty and all tasks '
                        'has version >= 2.0.0.')
                ClusterPlugin.set_attributes(
                    cluster.id, plugin.id, enabled=enabled,
                    attrs=attrs if enabled or default else None
                )
Esempio n. 4
0
    def process_cluster_attributes(cls, cluster, attributes):
        """Generate Cluster-Plugins relation based on attributes.

        Iterates through plugins attributes, creates
        or deletes Cluster <-> Plugins relation if plugin
        is enabled or disabled.

        :param cluster: A cluster instance
        :type cluster: nailgun.db.sqlalchemy.models.cluster.Cluster
        :param attributes: Cluster attributes
        :type attributes: dict
        """
        from nailgun.objects import Release

        plugins = {}

        # Detach plugins data
        for k in list(attributes):
            if cls.is_plugin_data(attributes[k]):
                plugins[k] = attributes.pop(k)['metadata']

        propagate_task_deploy = get_in(attributes, 'common',
                                       'propagate_task_deploy', 'value')
        if propagate_task_deploy is not None:
            legacy_tasks_are_ignored = not propagate_task_deploy
        else:
            legacy_tasks_are_ignored = not get_in(
                cluster.attributes.editable, 'common', 'propagate_task_deploy',
                'value')

        for container in six.itervalues(plugins):
            default = container.get('default', False)
            for attrs in container.get('versions', []):
                version_metadata = attrs.pop('metadata')
                plugin_id = version_metadata['plugin_id']
                plugin = Plugin.get_by_uid(plugin_id)
                if not plugin:
                    logger.warning('Plugin with id "%s" is not found, skip it',
                                   plugin_id)
                    continue
                enabled = container['enabled'] \
                    and plugin_id == container['chosen_id']
                if (enabled and Release.is_lcm_supported(cluster.release)
                        and legacy_tasks_are_ignored
                        and cls.contains_legacy_tasks(
                            wrap_plugin(Plugin.get_by_uid(plugin.id)))):
                    raise errors.InvalidData(
                        'Cannot enable plugin with legacy tasks unless '
                        'propagate_task_deploy attribute is set. '
                        'Ensure tasks.yaml is empty and all tasks '
                        'has version >= 2.0.0.')
                ClusterPlugin.set_attributes(
                    cluster.id,
                    plugin.id,
                    enabled=enabled,
                    attrs=attrs if enabled or default else None)
Esempio n. 5
0
    def _validate_hugepages(cls, node, attrs):
        if not objects.NodeAttributes.is_hugepages_enabled(node,
                                                           attributes=attrs):
            return

        hugepage_sizes = set(
            objects.NodeAttributes.total_hugepages(node, attributes=attrs))
        supported_hugepages = set(
            str(page)
            for page in node.meta['numa_topology']['supported_hugepages'])

        if not hugepage_sizes.issubset(supported_hugepages):
            raise errors.InvalidData(
                "Node {0} doesn't support {1} Huge Page(s), supported Huge"
                " Page(s): {2}.".format(
                    node.id, ", ".join(hugepage_sizes - supported_hugepages),
                    ", ".join(supported_hugepages)))

        dpdk_hugepages = utils.get_in(attrs, 'hugepages', 'dpdk', 'value')
        if objects.Node.dpdk_enabled(node):
            min_dpdk_hugepages = consts.MIN_DPDK_HUGEPAGES_MEMORY
            if dpdk_hugepages < min_dpdk_hugepages:
                raise errors.InvalidData(
                    "Node {0} does not have enough hugepages for dpdk. "
                    "Need to allocate at least {1} MB.".format(
                        node.id, min_dpdk_hugepages))
        elif dpdk_hugepages != 0:
            raise errors.InvalidData("Hugepages for dpdk should be equal to 0 "
                                     "if dpdk is disabled.")

        try:
            objects.NodeAttributes.distribute_hugepages(node, attrs)
        except ValueError as exc:
            raise errors.InvalidData(exc.args[0])
Esempio n. 6
0
    def inject_node_status_transition(self, kwargs):
        if kwargs['status'] == consts.TASK_STATUSES.ready:
            selector = 'successful'
        elif kwargs['status'] == consts.TASK_STATUSES.error:
            selector = 'failed'
        elif kwargs['status'] == consts.TASK_STATUSES.stopped:
            selector = 'stopped'
        else:
            return

        node_statuses_transition = get_in(self.data['args'], 'tasks_metadata',
                                          'node_statuses_transitions',
                                          selector)
        if node_statuses_transition:
            kwargs['nodes'] = [
                dict(uid=uid, **node_statuses_transition)
                for uid in self.data['args']['tasks_graph']
            ]
Esempio n. 7
0
    def _validate_hugepages(cls, node, attrs):
        if not objects.NodeAttributes.is_hugepages_enabled(
                node, attributes=attrs):
            return

        hugepage_sizes = set(
            objects.NodeAttributes.total_hugepages(node, attributes=attrs)
        )
        supported_hugepages = set(
            str(page)
            for page in node.meta['numa_topology']['supported_hugepages']
        )

        if not hugepage_sizes.issubset(supported_hugepages):
            raise errors.InvalidData(
                "Node {0} doesn't support {1} Huge Page(s), supported Huge"
                " Page(s): {2}.".format(
                    node.id,
                    ", ".join(hugepage_sizes - supported_hugepages),
                    ", ".join(supported_hugepages)
                )
            )

        dpdk_hugepages = utils.get_in(attrs, 'hugepages', 'dpdk', 'value')
        if objects.Node.dpdk_enabled(node):
            min_dpdk_hugepages = consts.MIN_DPDK_HUGEPAGES_MEMORY
            if dpdk_hugepages < min_dpdk_hugepages:
                raise errors.InvalidData(
                    "Node {0} does not have enough hugepages for dpdk. "
                    "Need to allocate at least {1} MB.".format(
                        node.id,
                        min_dpdk_hugepages
                    )
                )
        elif dpdk_hugepages != 0:
            raise errors.InvalidData(
                "Hugepages for dpdk should be equal to 0 "
                "if dpdk is disabled."
            )

        try:
            objects.NodeAttributes.distribute_hugepages(node, attrs)
        except ValueError as exc:
            raise errors.InvalidData(exc.args[0])
Esempio n. 8
0
    def inject_node_status_transition(self, kwargs):
        if kwargs['status'] == consts.TASK_STATUSES.ready:
            selector = 'successful'
        elif kwargs['status'] == consts.TASK_STATUSES.error:
            selector = 'failed'
        elif kwargs['status'] == consts.TASK_STATUSES.stopped:
            selector = 'stopped'
        else:
            return

        node_statuses_transition = get_in(
            self.data['args'],
            'tasks_metadata', 'node_statuses_transitions', selector
        )
        if node_statuses_transition:
            kwargs['nodes'] = [
                dict(uid=uid, **node_statuses_transition)
                for uid in self.data['args']['tasks_graph']
            ]
Esempio n. 9
0
 def get_old_data(self, node_id, task_id):
     node_info = utils.get_in(self.old, task_id, 'nodes', node_id)
     if not node_info:
         return {}
     return UnionDict(self.old[task_id]['common'], node_info)
Esempio n. 10
0
def _get_node_attributes(graph, kind):
    r = get_in(graph, kind, 'node_attributes')
    if r is None:
        r = _DEFAULT_NODE_ATTRIBUTES[kind]
    return r
Esempio n. 11
0
def _get_node_attributes(graph, kind):
    r = get_in(graph, kind, 'node_attributes')
    if r is None:
        r = _DEFAULT_NODE_ATTRIBUTES[kind]
    return r
Esempio n. 12
0
 def get_old_data(self, node_id, task_id):
     node_info = utils.get_in(self.old, task_id, 'nodes', node_id)
     if not node_info:
         return {}
     return UnionDict(self.old[task_id]['common'], node_info)