示例#1
0
    def get_host_config(self, host):
        if (constants.CONTROLLER not in utils.get_personalities(host) and
                constants.WORKER not in utils.get_personalities(host)):
            return {}

        device_mappings = []
        for iface in self.context['interfaces'].values():
            if (iface['ifclass'] in [constants.INTERFACE_CLASS_PCI_SRIOV]):
                port = interface.get_interface_port(self.context, iface)

                datanets = interface.get_interface_datanets(
                    self.context, iface)
                for dnet in datanets:
                    device_mappings.append(
                        "%s:%s" % (dnet['name'], port['name']))
                    LOG.debug("get_host_config device_mappings=%s" %
                              device_mappings)

        config = {
            'neutron::agents::ml2::sriov::physical_device_mappings':
                device_mappings,
        }

        if host.personality == constants.CONTROLLER:
            service_parameters = self._get_service_parameter_configs(
                constants.SERVICE_TYPE_NETWORK)

            if service_parameters is None:
                return config

            # check if neutron bgp speaker is configured
            if host.hostname == constants.CONTROLLER_0_HOSTNAME:
                bgp_router_id = self._service_parameter_lookup_one(
                    service_parameters,
                    constants.SERVICE_PARAM_SECTION_NETWORK_BGP,
                    constants.SERVICE_PARAM_NAME_BGP_ROUTER_ID_C0,
                    None)
            else:
                bgp_router_id = self._service_parameter_lookup_one(
                    service_parameters,
                    constants.SERVICE_PARAM_SECTION_NETWORK_BGP,
                    constants.SERVICE_PARAM_NAME_BGP_ROUTER_ID_C1,
                    None)

            if bgp_router_id is not None:
                config.update({
                    'openstack::neutron::params::bgp_router_id':
                    bgp_router_id})

        return config
示例#2
0
 def get_host_config(self, host):
     if (constants.CONTROLLER not in utils.get_personalities(host)):
         return {}
     database_dir = "/opt/platform/nfv/vim/%s" % host.software_load
     return {
         'nfv::vim::database_dir': database_dir,
     }
示例#3
0
    def _get_per_host_overrides(self):
        host_list = []
        hosts = self.dbapi.ihost_get_list()

        for host in hosts:
            if (host.invprovision
                    in [constants.PROVISIONED, constants.PROVISIONING]):
                if constants.WORKER in utils.get_personalities(host):

                    hostname = str(host.hostname)
                    default_config = {}
                    vnc_config = {}
                    libvirt_config = {}
                    pci_config = {}
                    self._update_host_cpu_maps(host, default_config)
                    self._update_host_storage(host, default_config,
                                              libvirt_config)
                    self._update_host_addresses(host, default_config,
                                                vnc_config, libvirt_config)
                    self._update_host_pci_whitelist(host, pci_config)
                    host_nova = {
                        'name': hostname,
                        'conf': {
                            'nova': {
                                'DEFAULT': default_config,
                                'vnc': vnc_config,
                                'libvirt': libvirt_config,
                                'pci': pci_config if pci_config else None,
                            }
                        }
                    }
                    host_list.append(host_nova)
        return host_list
示例#4
0
    def _get_per_host_overrides(self):
        host_list = []
        hosts = self.dbapi.ihost_get_list()

        for host in hosts:
            host_labels = self.dbapi.label_get_by_host(host.id)
            if (host.invprovision in [constants.PROVISIONED,
                                      constants.PROVISIONING] or
                    host.ihost_action in [constants.UNLOCK_ACTION,
                                          constants.FORCE_UNLOCK_ACTION]):
                if (constants.WORKER in utils.get_personalities(host) and
                        utils.has_openstack_compute(host_labels)):

                    hostname = str(host.hostname)
                    host_neutron = {
                        'name': hostname,
                        'conf': {
                            'plugins': {
                                'openvswitch_agent': self._get_dynamic_ovs_agent_config(host),
                                'sriov_agent': self._get_dynamic_sriov_agent_config(host),
                            }
                        }
                    }
                    # if ovs runs on host, auto bridge add is covered by sysinv
                    if utils.get_vswitch_type(self.dbapi) == constants.VSWITCH_TYPE_NONE:
                        host_neutron['conf'].update({
                            'auto_bridge_add': self._get_host_bridges(host)})
                    host_list.append(host_neutron)

        return host_list
示例#5
0
    def get_host_config(self, host):
        if (constants.CONTROLLER not in utils.get_personalities(host)):
            return {}

        config = {
            'platform::smapi::params::bind_ip': host.mgmt_ip,
        }

        return config
示例#6
0
 def get_host_config(self, host):
     config = {}
     if (constants.WORKER in utils.get_personalities(host)
             and self._vswitch_type() == constants.VSWITCH_TYPE_OVS_DPDK):
         config.update(self._get_cpu_config(host))
         config.update(self._get_memory_config(host))
         config.update(self._get_port_config(host))
         config.update(self._get_virtual_config(host))
         config.update(self._get_lldp_config(host))
     return config
示例#7
0
    def get_host_config(self, host):
        if (constants.CONTROLLER not in utils.get_personalities(host)):
            return {}

        cinder_device, cinder_size_gib = utils._get_cinder_device_info(self.dbapi, host.id)
        config = {}
        if cinder_device:
            config.update({
                'openstack::cinder::params::cinder_device': cinder_device,
                'openstack::cinder::params::cinder_size': cinder_size_gib
            })
        return config
示例#8
0
    def _get_kvm_timer_advance_config(self, host):
        kvm_timer_advance_enabled = False
        vcpu_pin_set = None

        if constants.WORKER in utils.get_personalities(host):
            host_labels = self.dbapi.label_get_by_host(host.id)
            if utils.has_openstack_compute(host_labels):
                kvm_timer_advance_enabled = True
                vcpu_pin_set = self._get_vcpu_pin_set(host)

        return {
            'platform::compute::kvm_timer_advance::enabled':
            kvm_timer_advance_enabled,
            'platform::compute::kvm_timer_advance::vcpu_pin_set': vcpu_pin_set,
        }
示例#9
0
    def _get_per_host_overrides(self):
        host_list = []
        hosts = self.dbapi.ihost_get_list()

        for host in hosts:
            host_labels = self.dbapi.label_get_by_host(host.id)
            if (host.invprovision
                    in [constants.PROVISIONED, constants.PROVISIONING]
                    or host.ihost_action in [
                        constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION
                    ]):
                if (constants.WORKER in utils.get_personalities(host)
                        and utils.has_openstack_compute(host_labels)):

                    hostname = str(host.hostname)
                    default_config = {}
                    vnc_config = {}
                    libvirt_config = {}
                    pci_config = {}
                    neutron_config = {}
                    per_physnet_numa_config = {}
                    self._update_host_cpu_maps(host, default_config)
                    self._update_host_storage(host, default_config,
                                              libvirt_config)
                    self._update_host_addresses(host, default_config,
                                                vnc_config, libvirt_config)
                    self._update_host_pci_whitelist(host, pci_config)
                    self._update_reserved_memory(host, default_config)
                    self._update_host_neutron_physnet(host, neutron_config,
                                                      per_physnet_numa_config)
                    host_nova = {
                        'name': hostname,
                        'conf': {
                            'nova': {
                                'DEFAULT': default_config,
                                'vnc': vnc_config,
                                'libvirt': libvirt_config,
                                'pci': pci_config if pci_config else None,
                                'neutron': neutron_config
                            }
                        }
                    }
                    host_nova['conf']['nova'].update(per_physnet_numa_config)
                    host_list.append(host_nova)
        return host_list
示例#10
0
    def _get_host_k8s_cgroup_config(self, host):
        config = {}

        # determine set of all logical cpus and nodes
        host_cpus = self._get_host_cpu_list(host, threads=True)
        host_cpuset = set([c.cpu for c in host_cpus])
        host_nodeset = set([c.numa_node for c in host_cpus])

        # determine set of platform logical cpus and nodes
        platform_cpus = self._get_host_cpu_list(
            host, function=constants.PLATFORM_FUNCTION, threads=True)
        platform_cpuset = set([c.cpu for c in platform_cpus])
        platform_nodeset = set([c.numa_node for c in platform_cpus])

        # determine set of nonplatform logical cpus and nodes
        nonplatform_cpuset = host_cpuset - platform_cpuset
        nonplatform_nodeset = set()
        for c in host_cpus:
            if c.cpu not in platform_cpuset:
                nonplatform_nodeset.update([c.numa_node])

        if constants.WORKER in utils.get_personalities(host):
            if self.is_openstack_compute(host):
                k8s_cpuset = utils.format_range_set(platform_cpuset)
                k8s_nodeset = utils.format_range_set(platform_nodeset)
            else:
                k8s_cpuset = utils.format_range_set(nonplatform_cpuset)
                k8s_nodeset = utils.format_range_set(nonplatform_nodeset)
        else:
            k8s_cpuset = utils.format_range_set(host_cpuset)
            k8s_nodeset = utils.format_range_set(host_nodeset)

        LOG.debug('host:%s, k8s_cpuset:%s, k8s_nodeset:%s', host.hostname,
                  k8s_cpuset, k8s_nodeset)

        config.update({
            'platform::kubernetes::params::k8s_cpuset':
            "\"%s\"" % k8s_cpuset,
            'platform::kubernetes::params::k8s_nodeset':
            "\"%s\"" % k8s_nodeset,
        })

        return config
示例#11
0
    def _get_host_pcidp_config(self, host):
        config = {}
        if constants.WORKER not in utils.get_personalities(host):
            return config

        labels = self.dbapi.label_get_by_host(host.uuid)
        sriovdp_worker = False
        for l in labels:
            if (constants.SRIOVDP_LABEL == str(l.label_key) + '=' +
                    str(l.label_value)):
                sriovdp_worker = True
                break

        if (sriovdp_worker is True):
            config.update({
                'platform::kubernetes::worker::pci::pcidp_resources':
                self._get_pcidp_resources(host),
            })
        return config
示例#12
0
    def _get_per_host_overrides(self):
        host_list = []
        hosts = self.dbapi.ihost_get_list()

        for host in hosts:
            if (host.invprovision in [constants.PROVISIONED,
                                      constants.PROVISIONING]):
                if constants.WORKER in utils.get_personalities(host):

                    hostname = str(host.hostname)
                    host_neutron = {
                        'name': hostname,
                        'conf': {
                            'plugins': {
                                'openvswitch_agent': self._get_dynamic_ovs_agent_config(host),
                                'sriov_agent': self._get_dynamic_sriov_agent_config(host),
                            }
                        }
                    }
                    host_list.append(host_neutron)

        return host_list
示例#13
0
    def _get_host_cpu_config(self, host):
        config = {}
        if constants.WORKER in utils.get_personalities(host):
            host_cpus = self._get_host_cpu_list(host, threads=True)
            if not host_cpus:
                return config

            # Define the full range of CPUs for the compute host
            max_cpu = max(host_cpus, key=operator.attrgetter('cpu'))
            worker_cpu_list = "\"0-%d\"" % max_cpu.cpu

            platform_cpus_no_threads = self._get_platform_cpu_list(host)
            vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host)

            platform_cpu_list_with_quotes = \
                "\"%s\"" % ','.join([str(c.cpu) for c in platform_cpus_no_threads])

            platform_numa_cpus = utils.get_numa_index_list(
                platform_cpus_no_threads)
            vswitch_numa_cpus = utils.get_numa_index_list(
                vswitch_cpus_no_threads)

            # build a list of platform reserved cpus per numa node
            platform_cores = []
            for node, cpus in platform_numa_cpus.items():
                cpu_list = ','.join([str(c.cpu) for c in cpus])
                platform_node = "\"node%d:%s\"" % (node, cpu_list)
                platform_cores.append(platform_node)

            # build a list of vswitch reserved cpu counts per numa node
            vswitch_cores = []
            for node, cpus in vswitch_numa_cpus.items():
                cpu_count = len(cpus)
                vswitch_node = "\"node%d:%d\"" % (node, cpu_count)
                vswitch_cores.append(vswitch_node)

            reserved_platform_cores = "(%s)" % ' '.join(platform_cores)
            reserved_vswitch_cores = "(%s)" % ' '.join(vswitch_cores)

            host_cpus = sorted(host_cpus, key=lambda c: c.cpu)
            n_cpus = len(host_cpus)
            host_cpu_list = [c.cpu for c in host_cpus]

            platform_cpus = self._get_host_cpu_list(
                host, function=constants.PLATFORM_FUNCTION, threads=True)
            platform_cpus = sorted(platform_cpus, key=lambda c: c.cpu)
            platform_cpu_list = \
                "%s" % ','.join([str(c.cpu) for c in platform_cpus])

            vswitch_cpus = self._get_host_cpu_list(host,
                                                   constants.VSWITCH_FUNCTION,
                                                   threads=True)
            vswitch_cpus = sorted(vswitch_cpus, key=lambda c: c.cpu)
            vswitch_cpu_list = \
                "%s" % ','.join([str(c.cpu) for c in vswitch_cpus])

            # rcu_nocbs = all cores - platform cores
            rcu_nocbs = copy.deepcopy(host_cpu_list)
            for i in [int(s) for s in platform_cpu_list.split(',')]:
                rcu_nocbs.remove(i)

            # change the CPU list to ranges
            rcu_nocbs_ranges = ""
            for key, group in itertools.groupby(enumerate(rcu_nocbs),
                                                lambda xy: xy[1] - xy[0]):
                group = list(group)
                rcu_nocbs_ranges += "%s-%s," % (group[0][1], group[-1][1])
            rcu_nocbs_ranges = rcu_nocbs_ranges.rstrip(',')

            # non-vswitch CPUs = all cores - vswitch cores
            non_vswitch_cpus = host_cpu_list
            for i in [c.cpu for c in vswitch_cpus]:
                non_vswitch_cpus.remove(i)

            # change the CPU list to ranges
            non_vswitch_cpus_ranges = ""
            for key, group in itertools.groupby(enumerate(non_vswitch_cpus),
                                                lambda xy: xy[1] - xy[0]):
                group = list(group)
                non_vswitch_cpus_ranges += "\"%s-%s\"," % (group[0][1],
                                                           group[-1][1])

            cpu_options = ""
            if constants.LOWLATENCY in host.subfunctions:
                vswitch_cpu_list_with_quotes = \
                    "\"%s\"" % ','.join([str(c.cpu) for c in vswitch_cpus])
                config.update({
                    'platform::compute::pmqos::low_wakeup_cpus':
                    vswitch_cpu_list_with_quotes,
                    'platform::compute::pmqos::hight_wakeup_cpus':
                    non_vswitch_cpus_ranges.rstrip(',')
                })
                vswitch_cpu_list = rcu_nocbs_ranges
                cpu_options += "nohz_full=%s " % vswitch_cpu_list

            cpu_options += "isolcpus=%s rcu_nocbs=%s kthread_cpus=%s " \
                "irqaffinity=%s" % (vswitch_cpu_list,
                                    rcu_nocbs_ranges,
                                    platform_cpu_list,
                                    platform_cpu_list)
            config.update({
                'platform::compute::params::worker_cpu_list':
                worker_cpu_list,
                'platform::compute::params::platform_cpu_list':
                platform_cpu_list_with_quotes,
                'platform::compute::params::reserved_vswitch_cores':
                reserved_vswitch_cores,
                'platform::compute::params::reserved_platform_cores':
                reserved_platform_cores,
                'platform::compute::grub::params::n_cpus':
                n_cpus,
                'platform::compute::grub::params::cpu_options':
                cpu_options,
            })
        return config
示例#14
0
 def _get_datanetworks(self, host):
     dnets = {}
     if constants.WORKER in utils.get_personalities(host):
         dnets = self.dbapi.datanetworks_get_all()
     return dnets
示例#15
0
    def _get_host_cpu_config(self, host):
        config = {}
        if constants.WORKER in utils.get_personalities(host):
            host_cpus = self._get_host_cpu_list(host, threads=True)
            if not host_cpus:
                return config

            platform_cpus_no_threads = self._get_platform_cpu_list(host)
            vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host)

            platform_numa_cpus = utils.get_numa_index_list(
                platform_cpus_no_threads)
            vswitch_numa_cpus = utils.get_numa_index_list(
                vswitch_cpus_no_threads)

            # build a list of platform reserved cpus per numa node
            platform_cores = []
            for node, cpus in platform_numa_cpus.items():
                cpu_list = ','.join([str(c.cpu) for c in cpus])
                platform_node = "\"node%d:%s\"" % (node, cpu_list)
                platform_cores.append(platform_node)

            # build a list of vswitch reserved cpu counts per numa node
            vswitch_cores = []
            for node, cpus in vswitch_numa_cpus.items():
                cpu_count = len(cpus)
                vswitch_node = "\"node%d:%d\"" % (node, cpu_count)
                vswitch_cores.append(vswitch_node)

            reserved_platform_cores = "(%s)" % ' '.join(platform_cores)
            reserved_vswitch_cores = "(%s)" % ' '.join(vswitch_cores)

            # all logical cpus
            host_cpus = self._get_host_cpu_list(host, threads=True)
            host_cpuset = set([c.cpu for c in host_cpus])
            host_ranges = utils.format_range_set(host_cpuset)
            n_cpus = len(host_cpuset)

            # platform logical cpus
            platform_cpus = self._get_host_cpu_list(
                host, function=constants.PLATFORM_FUNCTION, threads=True)
            platform_cpuset = set([c.cpu for c in platform_cpus])
            platform_ranges = utils.format_range_set(platform_cpuset)

            # vswitch logical cpus
            vswitch_cpus = self._get_host_cpu_list(host,
                                                   constants.VSWITCH_FUNCTION,
                                                   threads=True)
            vswitch_cpuset = set([c.cpu for c in vswitch_cpus])

            # non-platform logical cpus
            rcu_nocbs_cpuset = host_cpuset - platform_cpuset
            rcu_nocbs_ranges = utils.format_range_set(rcu_nocbs_cpuset)

            # isolated logical cpus
            app_isolated_cpus = self._get_host_cpu_list(
                host, constants.ISOLATED_FUNCTION, threads=True)
            app_isolated_cpuset = set([c.cpu for c in app_isolated_cpus])

            # application cpus
            app_cpus = self._get_host_cpu_list(host,
                                               constants.APPLICATION_FUNCTION,
                                               threads=True)
            app_cpuset = set([c.cpu for c in app_cpus])
            app_ranges = utils.format_range_set(app_cpuset)

            cpu_options = ""
            cpu_ranges = {}
            if constants.LOWLATENCY in host.subfunctions:
                # set PM QoS latency that achieves C1 state for all cpus
                config.update({
                    'platform::compute::pmqos::low_wakeup_cpus':
                    "\"%s\"" % host_ranges,
                    'platform::compute::pmqos::hight_wakeup_cpus':
                    "\"%s\"" % "",
                })
                cpu_ranges.update({"nohz_full": rcu_nocbs_ranges})

            isolcpus_ranges = utils.format_range_set(
                vswitch_cpuset.union(app_isolated_cpuset))

            cpu_ranges.update({
                "isolcpus": isolcpus_ranges,
                "rcu_nocbs": rcu_nocbs_ranges,
                "kthread_cpus": platform_ranges
            })

            # Put IRQs on application cores if they are configured.
            # Note that PCI IRQs for platform interfaces are reaffined to
            # platform cores at runtime.
            if app_cpuset:
                cpu_ranges.update({"irqaffinity": app_ranges})
            else:
                cpu_ranges.update({"irqaffinity": platform_ranges})

            for key, value in cpu_ranges.items():
                if str(value).strip() != "":
                    cpu_options += "%s=%s " % (key, value)

            config.update({
                'platform::compute::params::worker_cpu_list':
                "\"%s\"" % host_ranges,
                'platform::compute::params::platform_cpu_list':
                "\"%s\"" % platform_ranges,
                'platform::compute::params::reserved_vswitch_cores':
                reserved_vswitch_cores,
                'platform::compute::params::reserved_platform_cores':
                reserved_platform_cores,
                'platform::compute::grub::params::n_cpus':
                n_cpus,
                'platform::compute::grub::params::cpu_options':
                cpu_options,
            })
        return config
示例#16
0
    def _get_host_k8s_cgroup_config(self, host):
        config = {}

        # determine set of all logical cpus and nodes
        host_cpus = self._get_host_cpu_list(host, threads=True)
        host_cpuset = set([c.cpu for c in host_cpus])
        host_nodeset = set([c.numa_node for c in host_cpus])

        # determine set of platform logical cpus and nodes
        platform_cpus = self._get_host_cpu_list(
            host, function=constants.PLATFORM_FUNCTION, threads=True)
        platform_cpuset = set([c.cpu for c in platform_cpus])
        platform_nodeset = set([c.numa_node for c in platform_cpus])

        vswitch_cpus = self._get_host_cpu_list(
            host, function=constants.VSWITCH_FUNCTION, threads=True)
        vswitch_cpuset = set([c.cpu for c in vswitch_cpus])

        # determine set of isolcpus logical cpus and nodes
        isol_cpus = self._get_host_cpu_list(
            host, function=constants.ISOLATED_FUNCTION, threads=True)
        isol_cpuset = set([c.cpu for c in isol_cpus])

        # determine reserved sets of logical cpus in a string range set format
        # to pass as options to kubelet
        k8s_platform_cpuset = utils.format_range_set(platform_cpuset)
        k8s_all_reserved_cpuset = utils.format_range_set(platform_cpuset
                                                         | vswitch_cpuset
                                                         | isol_cpuset)

        # determine platform reserved memory
        k8s_reserved_mem = 0
        host_memory = self.dbapi.imemory_get_by_ihost(host.id)
        numa_memory = utils.get_numa_index_list(host_memory)
        for node, memory in numa_memory.items():
            reserved_mib = memory[0].platform_reserved_mib
            if reserved_mib is not None:
                k8s_reserved_mem += reserved_mib

        # determine set of nonplatform logical cpus
        # TODO(jgauld): Commented out for now, using host_cpuset instead.
        # nonplatform_cpuset = host_cpuset - platform_cpuset

        if constants.WORKER in utils.get_personalities(host):
            if self.is_openstack_compute(host):
                k8s_cpuset = utils.format_range_set(platform_cpuset)
                k8s_nodeset = utils.format_range_set(platform_nodeset)
            else:
                # kubelet cpumanager is configured with static policy.
                # The resulting DefaultCPUSet excludes reserved cpus
                # based on topology, and that also happens to correspond
                # to the platform_cpuset. kubepods are allowed to
                # span all host numa nodes.
                # TODO(jgauld): Temporary workaround until we have a version
                # of kubelet that excludes reserved cpus from DefaultCPUSet.
                # The intent is to base k8s_cpuset on nonplatform_cpuset.
                # Commented out for now, using host_cpuset instead.
                # k8s_cpuset = utils.format_range_set(nonplatform_cpuset)
                k8s_cpuset = utils.format_range_set(host_cpuset)
                k8s_nodeset = utils.format_range_set(host_nodeset)
        else:
            k8s_cpuset = utils.format_range_set(host_cpuset)
            k8s_nodeset = utils.format_range_set(host_nodeset)

        LOG.debug('host:%s, k8s_cpuset:%s, k8s_nodeset:%s', host.hostname,
                  k8s_cpuset, k8s_nodeset)

        # determine cpu/topology mgr policies
        labels = self.dbapi.label_get_by_host(host.uuid)
        for label in labels:
            if label.label_key == constants.KUBE_TOPOLOGY_MANAGER_LABEL:
                config.update({
                    'platform::kubernetes::params::k8s_topology_mgr_policy':
                    label.label_value
                })
            elif label.label_key == constants.KUBE_CPU_MANAGER_LABEL:
                config.update({
                    'platform::kubernetes::params::k8s_cpu_mgr_policy':
                    label.label_value
                })

        config.update({
            'platform::kubernetes::params::k8s_cpuset':
            "\"%s\"" % k8s_cpuset,
            'platform::kubernetes::params::k8s_nodeset':
            "\"%s\"" % k8s_nodeset,
            'platform::kubernetes::params::k8s_platform_cpuset':
            "\"%s\"" % k8s_platform_cpuset,
            'platform::kubernetes::params::k8s_all_reserved_cpuset':
            "\"%s\"" % k8s_all_reserved_cpuset,
            'platform::kubernetes::params::k8s_reserved_mem':
            k8s_reserved_mem,
        })

        return config
示例#17
0
    def _get_host_memory_config(self, host):
        config = {}
        if constants.WORKER in utils.get_personalities(host):
            host_memory = self.dbapi.imemory_get_by_ihost(host.id)
            memory_numa_list = utils.get_numa_index_list(host_memory)

            platform_cpus_no_threads = self._get_platform_cpu_list(host)
            platform_core_count = len(platform_cpus_no_threads)

            platform_nodes = []
            vswitch_nodes = []

            hugepages_2Ms = []
            hugepages_1Gs = []
            vswitch_2M_pages = []
            vswitch_1G_pages = []
            vm_4K_pages = []
            vm_2M_pages = []
            vm_1G_pages = []

            vs_pages_updated = False

            for node, memory_list in memory_numa_list.items():

                memory = memory_list[0]
                vswitch_2M_page = 0
                vswitch_1G_page = 0

                platform_size = memory.platform_reserved_mib
                platform_node = "\"node%d:%dMB:%d\"" % (
                    node, platform_size, platform_core_count)
                platform_nodes.append(platform_node)

                vswitch_size = memory.vswitch_hugepages_size_mib
                vswitch_pages = memory.vswitch_hugepages_reqd \
                    if memory.vswitch_hugepages_reqd is not None \
                    else memory.vswitch_hugepages_nr

                if vswitch_pages == 0:
                    vswitch_pages = memory.vswitch_hugepages_nr

                vswitch_node = "\"node%d:%dkB:%d\"" % (
                        node, vswitch_size * 1024, vswitch_pages)
                vswitch_nodes.append(vswitch_node)

                vm_hugepages_nr_2M = memory.vm_hugepages_nr_2M_pending \
                    if memory.vm_hugepages_nr_2M_pending is not None \
                    else memory.vm_hugepages_nr_2M
                vm_hugepages_nr_1G = memory.vm_hugepages_nr_1G_pending \
                    if memory.vm_hugepages_nr_1G_pending is not None \
                    else memory.vm_hugepages_nr_1G
                vm_hugepages_nr_4K = memory.vm_hugepages_nr_4K \
                    if memory.vm_hugepages_nr_4K is not None else 0

                total_hugepages_2M = vm_hugepages_nr_2M
                total_hugepages_1G = vm_hugepages_nr_1G

                if memory.vswitch_hugepages_size_mib == constants.MIB_2M:
                    total_hugepages_2M += vswitch_pages
                    vswitch_2M_page += vswitch_pages
                elif memory.vswitch_hugepages_size_mib == constants.MIB_1G:
                    total_hugepages_1G += vswitch_pages
                    vswitch_1G_page += vswitch_pages

                vswitch_2M_pages.append(vswitch_2M_page)
                vswitch_1G_pages.append(vswitch_1G_page)

                hugepages_2M = "\"node%d:%dkB:%d\"" % (
                    node, constants.MIB_2M * 1024, total_hugepages_2M)
                hugepages_1G = "\"node%d:%dkB:%d\"" % (
                    node, constants.MIB_1G * 1024, total_hugepages_1G)
                hugepages_2Ms.append(hugepages_2M)
                hugepages_1Gs.append(hugepages_1G)

                vm_4K_pages.append(vm_hugepages_nr_4K)
                vm_2M_pages.append(vm_hugepages_nr_2M)
                vm_1G_pages.append(vm_hugepages_nr_1G)

                if (memory.vswitch_hugepages_reqd and
                        vswitch_pages != memory.vswitch_hugepages_nr):
                    vs_pages_updated = True

            platform_reserved_memory = "(%s)" % ' '.join(platform_nodes)
            vswitch_reserved_memory = "(%s)" % ' '.join(vswitch_nodes)

            nr_hugepages_2Ms = "(%s)" % ' '.join(hugepages_2Ms)
            nr_hugepages_1Gs = "(%s)" % ' '.join(hugepages_1Gs)

            vswitch_2M = "\"%s\"" % ','.join([str(i) for i in vswitch_2M_pages])
            vswitch_1G = "\"%s\"" % ','.join([str(i) for i in vswitch_1G_pages])
            vm_4K = "\"%s\"" % ','.join([str(i) for i in vm_4K_pages])
            vm_2M = "\"%s\"" % ','.join([str(i) for i in vm_2M_pages])
            vm_1G = "\"%s\"" % ','.join([str(i) for i in vm_1G_pages])

            config.update({
                'platform::compute::params::worker_base_reserved':
                    platform_reserved_memory,
                'platform::compute::params::compute_vswitch_reserved':
                    vswitch_reserved_memory,
                'platform::compute::hugepage::params::nr_hugepages_2M':
                    nr_hugepages_2Ms,
                'platform::compute::hugepage::params::nr_hugepages_1G':
                    nr_hugepages_1Gs,
                'platform::compute::hugepage::params::vswitch_2M_pages':
                    vswitch_2M,
                'platform::compute::hugepage::params::vswitch_1G_pages':
                    vswitch_1G,
                'platform::compute::hugepage::params::vm_4K_pages':
                    vm_4K,
                'platform::compute::hugepage::params::vm_2M_pages':
                    vm_2M,
                'platform::compute::hugepage::params::vm_1G_pages':
                    vm_1G,
            })
            if vs_pages_updated:
                grub_hugepages_1G = "hugepagesz=1G hugepages=%d" % (
                    sum(vswitch_1G_pages) + sum(vm_1G_pages))
                config.update({
                    'platform::compute::grub::params::g_hugepages':
                    grub_hugepages_1G,
                })
                if sum(vswitch_2M_pages) > 0:
                    config.update({
                        'platform::vswitch::params::hugepage_dir': '/mnt/huge-2048kB'
                    })

        return config
示例#18
0
    def _get_host_cpu_config(self, host):
        config = {}
        if constants.WORKER in utils.get_personalities(host):
            host_cpus = self._get_host_cpu_list(host, threads=True)
            if not host_cpus:
                return config

            platform_cpus_no_threads = self._get_platform_cpu_list(host)
            vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host)

            platform_numa_cpus = utils.get_numa_index_list(platform_cpus_no_threads)
            vswitch_numa_cpus = utils.get_numa_index_list(vswitch_cpus_no_threads)

            # build a list of platform reserved cpus per numa node
            platform_cores = []
            for node, cpus in platform_numa_cpus.items():
                cpu_list = ','.join([str(c.cpu) for c in cpus])
                platform_node = "\"node%d:%s\"" % (node, cpu_list)
                platform_cores.append(platform_node)

            # build a list of vswitch reserved cpu counts per numa node
            vswitch_cores = []
            for node, cpus in vswitch_numa_cpus.items():
                cpu_count = len(cpus)
                vswitch_node = "\"node%d:%d\"" % (node, cpu_count)
                vswitch_cores.append(vswitch_node)

            reserved_platform_cores = "(%s)" % ' '.join(platform_cores)
            reserved_vswitch_cores = "(%s)" % ' '.join(vswitch_cores)

            # all logical cpus
            host_cpus = self._get_host_cpu_list(host, threads=True)
            host_cpuset = set([c.cpu for c in host_cpus])
            host_ranges = utils.format_range_set(host_cpuset)
            n_cpus = len(host_cpuset)

            # platform logical cpus
            platform_cpus = self._get_host_cpu_list(
                host, function=constants.PLATFORM_FUNCTION, threads=True)
            platform_cpuset = set([c.cpu for c in platform_cpus])
            platform_ranges = utils.format_range_set(platform_cpuset)

            # vswitch logical cpus
            vswitch_cpus = self._get_host_cpu_list(
                host, constants.VSWITCH_FUNCTION, threads=True)
            vswitch_cpuset = set([c.cpu for c in vswitch_cpus])
            vswitch_ranges = utils.format_range_set(vswitch_cpuset)

            # non-platform logical cpus
            rcu_nocbs_cpuset = host_cpuset - platform_cpuset
            rcu_nocbs_ranges = utils.format_range_set(rcu_nocbs_cpuset)

            # non-vswitch logical cpus
            non_vswitch_cpuset = host_cpuset - vswitch_cpuset
            non_vswitch_ranges = utils.format_range_set(non_vswitch_cpuset)

            cpu_options = ""
            if constants.LOWLATENCY in host.subfunctions:
                config.update({
                    'platform::compute::pmqos::low_wakeup_cpus':
                        "\"%s\"" % vswitch_ranges,
                    'platform::compute::pmqos::hight_wakeup_cpus':
                        "\"%s\"" % non_vswitch_ranges,
                })
                vswitch_ranges = rcu_nocbs_ranges
                cpu_options += "nohz_full=%s " % vswitch_ranges

            cpu_options += "isolcpus=%s rcu_nocbs=%s kthread_cpus=%s " \
                "irqaffinity=%s" % (vswitch_ranges,
                                    rcu_nocbs_ranges,
                                    platform_ranges,
                                    platform_ranges)
            config.update({
                'platform::compute::params::worker_cpu_list':
                    "\"%s\"" % host_ranges,
                'platform::compute::params::platform_cpu_list':
                    "\"%s\"" % platform_ranges,
                'platform::compute::params::reserved_vswitch_cores':
                    reserved_vswitch_cores,
                'platform::compute::params::reserved_platform_cores':
                    reserved_platform_cores,
                'platform::compute::grub::params::n_cpus': n_cpus,
                'platform::compute::grub::params::cpu_options': cpu_options,
            })
        return config
示例#19
0
    def _get_host_k8s_cgroup_config(self, host):
        config = {}

        # determine set of all logical cpus and nodes
        host_cpus = self._get_host_cpu_list(host, threads=True)
        host_cpuset = set([c.cpu for c in host_cpus])
        host_nodeset = set([c.numa_node for c in host_cpus])

        # determine set of platform logical cpus and nodes
        platform_cpus = self._get_host_cpu_list(
            host, function=constants.PLATFORM_FUNCTION, threads=True)
        platform_cpuset = set([c.cpu for c in platform_cpus])
        platform_nodeset = set([c.numa_node for c in platform_cpus])

        # determine platform reserved number of logical cpus
        k8s_reserved_cpus = len(platform_cpuset)

        # determine platform reserved memory
        k8s_reserved_mem = 0
        host_memory = self.dbapi.imemory_get_by_ihost(host.id)
        numa_memory = utils.get_numa_index_list(host_memory)
        for node, memory in numa_memory.items():
            reserved_mib = memory[0].platform_reserved_mib
            if reserved_mib is not None:
                k8s_reserved_mem += reserved_mib

        # determine set of nonplatform logical cpus
        # TODO(jgauld): Commented out for now, using host_cpuset instead.
        # nonplatform_cpuset = host_cpuset - platform_cpuset

        if constants.WORKER in utils.get_personalities(host):
            if self.is_openstack_compute(host):
                k8s_cpuset = utils.format_range_set(platform_cpuset)
                k8s_nodeset = utils.format_range_set(platform_nodeset)
            else:
                # kubelet cpumanager is configured with static policy.
                # The resulting DefaultCPUSet excludes reserved cpus
                # based on topology, and that also happens to correspond
                # to the platform_cpuset. kubepods are allowed to
                # span all host numa nodes.
                # TODO(jgauld): Temporary workaround until we have a version
                # of kubelet that excludes reserved cpus from DefaultCPUSet.
                # The intent is to base k8s_cpuset on nonplatform_cpuset.
                # Commented out for now, using host_cpuset instead.
                # k8s_cpuset = utils.format_range_set(nonplatform_cpuset)
                k8s_cpuset = utils.format_range_set(host_cpuset)
                k8s_nodeset = utils.format_range_set(host_nodeset)
        else:
            k8s_cpuset = utils.format_range_set(host_cpuset)
            k8s_nodeset = utils.format_range_set(host_nodeset)

        LOG.debug('host:%s, k8s_cpuset:%s, k8s_nodeset:%s', host.hostname,
                  k8s_cpuset, k8s_nodeset)

        config.update({
            'platform::kubernetes::params::k8s_cpuset':
            "\"%s\"" % k8s_cpuset,
            'platform::kubernetes::params::k8s_nodeset':
            "\"%s\"" % k8s_nodeset,
            'platform::kubernetes::params::k8s_reserved_cpus':
            k8s_reserved_cpus,
            'platform::kubernetes::params::k8s_reserved_mem':
            k8s_reserved_mem,
        })

        return config