def _get_per_host_overrides(self): host_list = [] hosts = self.dbapi.ihost_get_list() for host in hosts: host_labels = self.dbapi.label_get_by_host(host.id) if (host.invprovision in [constants.PROVISIONED, constants.PROVISIONING] or host.ihost_action in [constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION]): if (constants.WORKER in utils.get_personalities(host) and utils.has_openstack_compute(host_labels)): hostname = str(host.hostname) host_neutron = { 'name': hostname, 'conf': { 'plugins': { 'openvswitch_agent': self._get_dynamic_ovs_agent_config(host), 'sriov_agent': self._get_dynamic_sriov_agent_config(host), } } } # if ovs runs on host, auto bridge add is covered by sysinv if utils.get_vswitch_type(self.dbapi) == constants.VSWITCH_TYPE_NONE: host_neutron['conf'].update({ 'auto_bridge_add': self._get_host_bridges(host)}) host_list.append(host_neutron) return host_list
def _get_kvm_timer_advance_config(self, host): kvm_timer_advance_enabled = False vcpu_pin_set = None if constants.WORKER in utils.get_personalities(host): host_labels = self.dbapi.label_get_by_host(host.id) if utils.has_openstack_compute(host_labels): kvm_timer_advance_enabled = True vcpu_pin_set = self._get_vcpu_pin_set(host) return { 'platform::compute::kvm_timer_advance::enabled': kvm_timer_advance_enabled, 'platform::compute::kvm_timer_advance::vcpu_pin_set': vcpu_pin_set, }
def _get_per_host_overrides(self): host_list = [] hosts = self.dbapi.ihost_get_list() for host in hosts: host_labels = self.dbapi.label_get_by_host(host.id) if (host.invprovision in [constants.PROVISIONED, constants.PROVISIONING] or host.ihost_action in [ constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION ]): if (constants.WORKER in utils.get_personalities(host) and utils.has_openstack_compute(host_labels)): hostname = str(host.hostname) default_config = {} vnc_config = {} libvirt_config = {} pci_config = {} neutron_config = {} per_physnet_numa_config = {} self._update_host_cpu_maps(host, default_config) self._update_host_storage(host, default_config, libvirt_config) self._update_host_addresses(host, default_config, vnc_config, libvirt_config) self._update_host_pci_whitelist(host, pci_config) self._update_reserved_memory(host, default_config) self._update_host_neutron_physnet(host, neutron_config, per_physnet_numa_config) host_nova = { 'name': hostname, 'conf': { 'nova': { 'DEFAULT': default_config, 'vnc': vnc_config, 'libvirt': libvirt_config, 'pci': pci_config if pci_config else None, 'neutron': neutron_config } } } host_nova['conf']['nova'].update(per_physnet_numa_config) host_list.append(host_nova) return host_list
def check_core_allocations(host, cpu_counts, cpu_lists=None): """Check that minimum and maximum core values are respected.""" if cpu_lists: # Verify no overlaps in cpulists for different functions. Not all # functions are guaranteed to be present as keys in cpu_lists. cpulist = [] for function in CORE_FUNCTIONS: functionlist = cpu_lists.get(function, []) if set(cpulist).intersection(functionlist): raise wsme.exc.ClientSideError( "Some CPUs are specified for more than one function.") cpulist.extend(functionlist) # NOTE: contrary to the variable names, these are actually logical CPUs # rather than cores, so if hyperthreading is enabled they're SMT siblings. total_platform_cores = 0 total_vswitch_cores = 0 total_shared_cores = 0 total_isolated_cores = 0 for s in range(0, len(host.nodes)): available_cores = len(host.cpu_lists[s]) platform_cores = cpu_counts[s][constants.PLATFORM_FUNCTION] vswitch_cores = cpu_counts[s][constants.VSWITCH_FUNCTION] shared_cores = cpu_counts[s][constants.SHARED_FUNCTION] isolated_cores = cpu_counts[s][constants.ISOLATED_FUNCTION] requested_cores = \ platform_cores + vswitch_cores + shared_cores + isolated_cores if requested_cores > available_cores: raise wsme.exc.ClientSideError( "More total logical cores requested than present on Processor " "%s (%s cores)." % (s, available_cores)) total_platform_cores += platform_cores total_vswitch_cores += vswitch_cores total_shared_cores += shared_cores total_isolated_cores += isolated_cores # Add any cpus specified via ranges to the totals. # Note: Can't specify by both count and range for the same function. if cpu_lists: total_platform_cores += len(cpu_lists.get(constants.PLATFORM_FUNCTION, [])) total_vswitch_cores += len(cpu_lists.get(constants.VSWITCH_FUNCTION, [])) total_shared_cores += len(cpu_lists.get(constants.SHARED_FUNCTION, [])) total_isolated_cores += len(cpu_lists.get(constants.ISOLATED_FUNCTION, [])) # Validate Platform cores (actually logical CPUs) if ((constants.CONTROLLER in host.subfunctions) and (constants.WORKER in host.subfunctions)): if total_platform_cores < 2: raise wsme.exc.ClientSideError("%s must have at least two cores." % constants.PLATFORM_FUNCTION) elif total_platform_cores == 0: raise wsme.exc.ClientSideError("%s must have at least one core." % constants.PLATFORM_FUNCTION) # Validate shared cores (actually logical CPUs) for s in range(0, len(host.nodes)): shared_cores = cpu_counts[s][constants.SHARED_FUNCTION] if host.hyperthreading: shared_cores /= 2 if shared_cores > 1: raise wsme.exc.ClientSideError( '%s cores are limited to 1 per processor.' % constants.SHARED_FUNCTION) # Validate vswitch cores (actually logical CPUs) if total_vswitch_cores != 0: vswitch_type = cutils.get_vswitch_type(pecan.request.dbapi) if constants.VSWITCH_TYPE_NONE == vswitch_type: raise wsme.exc.ClientSideError( ('vSwitch cpus can only be used with a vswitch_type ' 'specified.')) vswitch_physical_cores = total_vswitch_cores if host.hyperthreading: vswitch_physical_cores /= 2 if vswitch_physical_cores > VSWITCH_MAX_CORES: raise wsme.exc.ClientSideError( "The %s function can only be assigned up to %s cores." % (constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES)) # Validate Isolated cores: (actually logical CPUs) # - Prevent isolated core assignment if vswitch or shared cores are # allocated. if total_isolated_cores > 0: labels = pecan.request.dbapi.label_get_by_host(host.id) if not cutils.has_openstack_compute(labels): if total_vswitch_cores != 0 or total_shared_cores != 0: raise wsme.exc.ClientSideError( "%s cores can only be configured with %s and %s core types." % (constants.ISOLATED_FUNCTION, constants.PLATFORM_FUNCTION, constants.APPLICATION_FUNCTION)) reserved_for_applications = len(host.cpus) - total_platform_cores - \ total_vswitch_cores - total_isolated_cores if reserved_for_applications <= 0: raise wsme.exc.ClientSideError( "There must be at least one unused core for %s." % constants.APPLICATION_FUNCTION)
def _get_host_cpu_config(self, host): config = {} if constants.WORKER in utils.get_personalities(host): host_cpus = self._get_host_cpu_list(host, threads=True) if not host_cpus: return config platform_cpus_no_threads = self._get_platform_cpu_list(host) vswitch_cpus_no_threads = self._get_vswitch_cpu_list(host) platform_numa_cpus = utils.get_numa_index_list(platform_cpus_no_threads) vswitch_numa_cpus = utils.get_numa_index_list(vswitch_cpus_no_threads) # build a list of platform reserved cpus per numa node platform_cores = [] for node, cpus in platform_numa_cpus.items(): cpu_list = ','.join([str(c.cpu) for c in cpus]) platform_node = "\"node%d:%s\"" % (node, cpu_list) platform_cores.append(platform_node) # build a list of vswitch reserved cpu counts per numa node vswitch_cores = [] for node, cpus in vswitch_numa_cpus.items(): cpu_count = len(cpus) vswitch_node = "\"node%d:%d\"" % (node, cpu_count) vswitch_cores.append(vswitch_node) reserved_platform_cores = "(%s)" % ' '.join(platform_cores) reserved_vswitch_cores = "(%s)" % ' '.join(vswitch_cores) # all logical cpus host_cpus = self._get_host_cpu_list(host, threads=True) host_cpuset = set([c.cpu for c in host_cpus]) host_ranges = utils.format_range_set(host_cpuset) n_cpus = len(host_cpuset) # platform logical cpus platform_cpus = self._get_host_cpu_list( host, function=constants.PLATFORM_FUNCTION, threads=True) platform_cpuset = set([c.cpu for c in platform_cpus]) platform_ranges = utils.format_range_set(platform_cpuset) # vswitch logical cpus vswitch_cpus = self._get_host_cpu_list( host, constants.VSWITCH_FUNCTION, threads=True) vswitch_cpuset = set([c.cpu for c in vswitch_cpus]) vswitch_ranges = utils.format_range_set(vswitch_cpuset) # non-platform logical cpus rcu_nocbs_cpuset = host_cpuset - platform_cpuset rcu_nocbs_ranges = utils.format_range_set(rcu_nocbs_cpuset) # non-vswitch logical cpus non_vswitch_cpuset = host_cpuset - vswitch_cpuset non_vswitch_ranges = utils.format_range_set(non_vswitch_cpuset) cpu_options = "" cpu_ranges = {} isolcpus_ranges = vswitch_ranges if constants.LOWLATENCY in host.subfunctions: config.update({ 'platform::compute::pmqos::low_wakeup_cpus': "\"%s\"" % vswitch_ranges, 'platform::compute::pmqos::hight_wakeup_cpus': "\"%s\"" % non_vswitch_ranges, }) cpu_ranges.update({"nohz_full": rcu_nocbs_ranges}) host_labels = self.dbapi.label_get_by_host(host.id) if utils.has_openstack_compute(host_labels): isolcpus_ranges = rcu_nocbs_ranges cpu_ranges.update({ "isolcpus": isolcpus_ranges, "rcu_nocbs": rcu_nocbs_ranges, "kthread_cpus": platform_ranges, "irqaffinity": platform_ranges }) for key, value in cpu_ranges.items(): if str(value).strip() != "": cpu_options += "%s=%s " % (key, value) config.update({ 'platform::compute::params::worker_cpu_list': "\"%s\"" % host_ranges, 'platform::compute::params::platform_cpu_list': "\"%s\"" % platform_ranges, 'platform::compute::params::reserved_vswitch_cores': reserved_vswitch_cores, 'platform::compute::params::reserved_platform_cores': reserved_platform_cores, 'platform::compute::grub::params::n_cpus': n_cpus, 'platform::compute::grub::params::cpu_options': cpu_options, }) return config