def get_capabilities(self): """Returns the host capabilities information Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. Note: The result is cached in the member attribute _caps. :returns: a config.LibvirtConfigCaps object """ if not self._caps: xmlstr = self.get_connection().getCapabilities() LOG.info(_LI("Libvirt host capabilities %s"), xmlstr) self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'): try: features = self.get_connection().baselineCPU( [self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) # FIXME(wangpan): the return value of baselineCPU should be # None or xml string, but libvirt has a bug # of it from 1.1.2 which is fixed in 1.2.0, # this -1 checking should be removed later. if features and features != -1: cpu = vconfig.LibvirtConfigCPU() cpu.parse_str(features) self._caps.host.cpu.features = cpu.features except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.warn( _LW("URI %(uri)s does not support full set" " of host capabilities: %(error)s"), { 'uri': self._uri, 'error': ex }) else: raise # PF9: Get sockets, cores and threads from lscpu instead of # libvirt try: (stdout, stderr) = processutils.execute("lscpu") lscpu_cpu_info = Host._parse_cpu_info_pf9(stdout) if set(['cores', 'sockets', 'threads']) == set(lscpu_cpu_info.keys()): LOG.info("Using cpu information from lscpu") self._caps.host.cpu.cores = lscpu_cpu_info["cores"] self._caps.host.cpu.sockets = lscpu_cpu_info["sockets"] self._caps.host.cpu.threads = lscpu_cpu_info["threads"] else: LOG.info("Using cpu information from libvirt") except (OSError, processutils.ProcessExecutionError): # Looks like we can't use lscpu on this hypervisor. We continue # to use libvirt provided cores, socket and thread counts. LOG.info("Using cpu information from libvirt") # PF9 end return self._caps
def test_config_simple(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <model>Penryn</model> </cpu> """)
def detail(self, req): context = req.environ['nova.context'] authorize(context) compute_nodes = self.host_api.compute_node_get_all(context) # begin:<wangzh21>:<Bugzilla - bug 75256>:<a>:<2016-11-17> server_id = req.GET.get('server_id', None) if server_id: compute_nodes_after_filter = [] instance = common.get_instance(self.compute_api, context, server_id) src_compute_info = objects.ComputeNode. \ get_first_node_by_host_for_old_compat(context, instance.host) if not instance.vcpu_model or not instance.vcpu_model.model: source_cpu_info = src_compute_info['cpu_info'] info = jsonutils.loads(source_cpu_info) LOG.info(_LI('Instance launched has CPU info: %s'), source_cpu_info) cpu = vconfig.LibvirtConfigCPU() cpu.arch = info['arch'] cpu.model = info['model'] cpu.vendor = info['vendor'] cpu.sockets = info['topology']['sockets'] cpu.cores = info['topology']['cores'] cpu.threads = info['topology']['threads'] for f in info['features']: cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f)) else: cpu = self._vcpu_model_to_cpu_config(instance.vcpu_model) cpu_xml = cpu.to_xml() for compute_node in compute_nodes: dst_cpu_info = jsonutils.loads(compute_node['cpu_info']) ret = self._compareCPU(cpu_xml, dst_cpu_info) service = self.host_api.service_get_by_compute_host( context, compute_node.host) state_is_up = self.servicegroup_api.service_is_up(service) status_is_disabled = service.disabled if compute_node['host'] == instance.host or not state_is_up or \ status_is_disabled: continue if ret > 0 and src_compute_info['hypervisor_type'] == \ compute_node['hypervisor_type'] and \ src_compute_info['hypervisor_version'] <= \ compute_node['hypervisor_version']: compute_nodes_after_filter.append(compute_node) compute_nodes = compute_nodes_after_filter # end:<wangzh21>:<Bugzilla - bug 75256>:<a>:<2016-11-17> req.cache_db_compute_nodes(compute_nodes) return dict(hypervisors=[ self._view_hypervisor( hyp, self.host_api.service_get_by_compute_host(context, hyp.host), True) for hyp in compute_nodes ])
def test_config_topology(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.sockets = 4 obj.cores = 4 obj.threads = 2 xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <model>Penryn</model> <topology sockets="4" cores="4" threads="2"/> </cpu> """)
def test_config_topology(self): obj = config.LibvirtConfigCPU() obj.vendor = "AMD" obj.sockets = 2 obj.cores = 4 obj.threads = 2 xml = obj.to_xml() self.assertXmlEqual( xml, """ <cpu> <vendor>AMD</vendor> <topology cores="4" threads="2" sockets="2"/> </cpu>""")
def get_capabilities(self): """Returns the host capabilities information Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. Note: The result is cached in the member attribute _caps. :returns: a config.LibvirtConfigCaps object """ if not self._caps: xmlstr = self.get_connection().getCapabilities() LOG.info(_LI("Libvirt host capabilities %s"), xmlstr) self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) # NOTE(mriedem): Don't attempt to get baseline CPU features # if libvirt can't determine the host cpu model. if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and self._caps.host.cpu.model is not None): try: features = self.get_connection().baselineCPU( [self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) # FIXME(wangpan): the return value of baselineCPU should be # None or xml string, but libvirt has a bug # of it from 1.1.2 which is fixed in 1.2.0, # this -1 checking should be removed later. if features and features != -1: cpu = vconfig.LibvirtConfigCPU() cpu.parse_str(features) self._caps.host.cpu.features = cpu.features except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.warn( _LW("URI %(uri)s does not support full set" " of host capabilities: " "%(error)s"), { 'uri': self._uri, 'error': ex }) else: raise return self._caps
def test_config_complex(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = "x86_64" obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <feature name="mtrr"/> <feature name="apic"/> </cpu> """)
def get_capabilities(self): """Returns the host capabilities information Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. Note: The result is cached in the member attribute _caps. :returns: a config.LibvirtConfigCaps object """ if not self._caps: xmlstr = self.get_connection().getCapabilities() LOG.info("Libvirt host capabilities %s", xmlstr) self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) # NOTE(mriedem): Don't attempt to get baseline CPU features # if libvirt can't determine the host cpu model. if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and self._caps.host.cpu.model is not None): try: xml_str = self._caps.host.cpu.to_xml() if six.PY3 and isinstance(xml_str, six.binary_type): xml_str = xml_str.decode('utf-8') features = self.get_connection().baselineCPU( [xml_str], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) if features: cpu = vconfig.LibvirtConfigCPU() cpu.parse_str(features) self._caps.host.cpu.features = cpu.features except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.warning( "URI %(uri)s does not support full set" " of host capabilities: %(error)s", { 'uri': self._uri, 'error': ex }) else: raise return self._caps
def test_config_cpu(self): obj = config.LibvirtConfigCPU() obj.vendor = "AMD" obj.model = "Quad-Core AMD Opteron(tm) Processor 2350" obj.arch = "x86_64" obj.add_feature("svm") obj.add_feature("extapic") obj.add_feature("constant_tsc") xml = obj.to_xml() self.assertXmlEqual( xml, """ <cpu> <arch>x86_64</arch> <model>Quad-Core AMD Opteron(tm) Processor 2350</model> <vendor>AMD</vendor> <feature name="svm"/> <feature name="extapic"/> <feature name="constant_tsc"/> </cpu>""")
def get_capabilities(self): """Returns the host capabilities information Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. Note: The result is cached in the member attribute _caps. :returns: a config.LibvirtConfigCaps object """ if not self._caps: xmlstr = self.get_connection().getCapabilities() LOG.info("Libvirt host capabilities %s", xmlstr) self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) # NOTE(mriedem): Don't attempt to get baseline CPU features # if libvirt can't determine the host cpu model. if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and self._caps.host.cpu.model is not None): try: xml_str = self._caps.host.cpu.to_xml() if six.PY3 and isinstance(xml_str, six.binary_type): xml_str = xml_str.decode('utf-8') features = self.get_connection().baselineCPU( [xml_str], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) if features: cpu = vconfig.LibvirtConfigCPU() cpu.parse_str(features) self._caps.host.cpu.features = cpu.features except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.warning("URI %(uri)s does not support full set" " of host capabilities: %(error)s", {'uri': self._uri, 'error': ex}) else: raise # WRS - Simplify memory accounting by engineering out of maximum # VM available memory instead of total physical memory. # After this adjustment, cell.mempages[] only represents VM memory # and corresponds exactly match compute-huge. # The following adjustment also removes vswitch hugepages overheads # and 4K pages overheads from accounting. # Important: cell.memory obtained from getCapabilities() is KiB. # The NumaCell.memory field is in MiB as converted by routine # virt/libvirt/driver.py: _get_host_numa_topology(). This is # confusing since both use same variable name. topology = self._caps.host.topology if topology is None or not topology.cells: return self._caps vm_4K_nodes = self._get_configured_pages( csv=CONF.compute_vm_4K_pages) vs_2M_nodes = self._get_configured_pages( csv=CONF.compute_vswitch_2M_pages) vs_1G_nodes = self._get_configured_pages( csv=CONF.compute_vswitch_1G_pages) for cell in topology.cells: msg = [] cell.memory = 0 for k, pages in enumerate(cell.mempages): pg = cell.mempages[k] if pages.size == MEMPAGE_SZ_4K and vm_4K_nodes: vm_4K = vm_4K_nodes[cell.id] oh_4K = pg.total - vm_4K pg.total -= oh_4K cell.memory += (pg.total * pg.size) MiB = oh_4K * MEMPAGE_SZ_4K // units.Ki msg.append('%d MiB 4K overhead' % (MiB)) if pages.size == MEMPAGE_SZ_2M and vs_2M_nodes: vs_2M = vs_2M_nodes[cell.id] pg.total -= vs_2M cell.memory += (pg.total * pg.size) MiB = vs_2M * MEMPAGE_SZ_2M // units.Ki msg.append('%d MiB 2M vswitch' % (MiB)) if pages.size == MEMPAGE_SZ_1G and vs_1G_nodes: vs_1G = vs_1G_nodes[cell.id] pg.total -= vs_1G cell.memory += (pg.total * pg.size) MiB = vs_1G * MEMPAGE_SZ_1G // units.Ki msg.append('%d MiB 1G vswitch' % (MiB)) LOG.info("cell:%(id)s exclude: %(msg)s", {'id': cell.id, 'msg': '; '.join(msg)}) return self._caps
def _update_numa_xml(xml_doc, driver_interface, instance): image_meta = objects.ImageMeta.from_instance(instance) # WRS: Use numa topology and allowed cpus from destination host. allowed_cpus = instance.migration_context.new_allowed_cpus # TODO(sahid/cfriesen): If the destination has more numa nodes than the # source then this could cause problems due to the loop over topology.cells # where "topology" is for the source host. numa_config = driver_interface.get_guest_numa_config( instance.migration_context.new_numa_topology, instance.flavor, allowed_cpus, image_meta) if numa_config[1] is None: # Quit early if the instance does not provide numa topology. return xml_doc membacking = driver_interface.get_guest_memory_backing_config( instance.numa_topology, numa_config.numatune, instance.flavor) cpu = driver_interface.get_guest_cpu_config(instance.flavor, image_meta, numa_config.numaconfig, instance.numa_topology) # We need to treat cpuset slightly differently - it's just an # attribute on the vcpu element, which we expect is always there vcpu = xml_doc.find('./vcpu') new_vcpu = etree.Element("vcpu") new_vcpu.text = six.text_type(instance.flavor.vcpus) if numa_config.cpuset: new_vcpu.set("cpuset", hardware.format_cpu_spec(numa_config.cpuset)) if vcpu is not None: xml_doc.remove(vcpu) xml_doc.append(new_vcpu) # WRS: It's possible that a guest cpu changes while migrating to a host # that has a different physical host cpu topology (for e.g. HT to non-HT). # Guest cpu topology can't change during live migration. For this reason, # we always keep the guest cpu topology that is currently used in the # running instance. The topology stays the same but the cpu_tune still # gets recalculated based on the InstanceNUMATopology that was # claimed on the destination. old_cpu_xml = xml_doc.find('./cpu') old_cpu = vconfig.LibvirtConfigCPU() old_cpu.parse_dom(old_cpu_xml) cpu.sockets = old_cpu.sockets cpu.cores = old_cpu.cores cpu.threads = old_cpu.threads def replace_from_config(tag, config): """Replace a top level node with xml generated by config """ elem = xml_doc.find('./' + tag) if elem is not None: xml_doc.remove(elem) if config: new_elem = config.format_dom() xml_doc.append(new_elem) replace = [('cputune', numa_config.cputune), ('numatune', numa_config.numatune), ('memoryBacking', membacking), ('cpu', cpu)] for tag, config in replace: replace_from_config(tag, config) return xml_doc