def compatible_cpu_models(): """ Compare qemu's CPU models to models the host is capable of emulating. Returns: A list of strings indicating compatible CPU models prefixed with 'model_'. Example: ['model_Haswell-noTSX', 'model_Nehalem', 'model_Conroe', 'model_coreduo', 'model_core2duo', 'model_Penryn', 'model_IvyBridge', 'model_Westmere', 'model_n270', 'model_SandyBridge'] """ c = libvirtconnection.get() arch = cpuarch.real() cpu_mode = _CpuMode.HOST_MODEL if cpuarch.is_ppc(arch) else _CpuMode.CUSTOM all_models = domain_cpu_models(c, arch, cpu_mode) compatible_models = [model for (model, usable) in six.iteritems(all_models) if usable == 'yes'] # Current QEMU doesn't report POWER compatibility modes, so we # must add them ourselves. if cpuarch.is_ppc(arch) and \ 'POWER9' in compatible_models and \ 'POWER8' not in compatible_models: compatible_models.append('POWER8') return list(set(["model_" + model for model in compatible_models]))
def uuid(): host_UUID = None try: if os.path.exists(P_VDSM_NODE_ID): with open(P_VDSM_NODE_ID) as f: host_UUID = f.readline().replace("\n", "") else: arch = cpuarch.real() if cpuarch.is_x86(arch): try: hw_info = dmidecodeUtil.getHardwareInfoStructure() host_UUID = hw_info['systemUUID'].lower() except KeyError: logging.warning('Could not find host UUID.') elif cpuarch.is_ppc(arch): # eg. output IBM,03061C14A try: with open('/proc/device-tree/system-id') as f: systemId = f.readline() host_UUID = systemId.rstrip('\0').replace(',', '') except IOError: logging.warning('Could not find host UUID.') except: logging.error("Error retrieving host UUID", exc_info=True) return host_UUID
def _parse_domain_cpu(dom, conf, arch): cpu_topology = dom.find('./cpu/topology') if cpu_topology is not None: cores = cpu_topology.attrib['cores'] threads = cpu_topology.attrib['threads'] sockets = cpu_topology.attrib['sockets'] conf['smpCoresPerSocket'] = cores conf['smpThreadsPerCore'] = threads conf['maxVCpus'] = str(int(sockets) * int(cores) * int(threads)) cpu_tune = dom.find('./cputune') if cpu_tune is not None: cpu_pinning = {} for cpu_pin in dom.findall('./cputune/vcpupin'): cpu_pinning[cpu_pin.attrib['vcpu']] = cpu_pin.attrib['cpuset'] if cpu_pinning: conf['cpuPinning'] = cpu_pinning cpu_numa = dom.find('./cpu/numa') if cpu_numa is not None: guest_numa_nodes = [] for index, cell in enumerate(dom.findall('./cpu/numa/cell')): guest_numa_nodes.append({ 'nodeIndex': index, 'cpus': ','.join(_expand_list(cell.attrib['cpus'])), 'memory': str(int(cell.attrib['memory']) // 1024), }) conf['guestNumaNodes'] = guest_numa_nodes if cpuarch.is_x86(arch): _parse_domain_cpu_x86(dom, conf) elif cpuarch.is_ppc(arch): _parse_domain_cpu_ppc(dom, conf)
def nested_virtualization(): if cpuarch.is_ppc(cpuarch.real()): return NestedVirtualization(False, None) if cpuarch.is_s390(cpuarch.real()): kvm_modules = ("kvm", ) else: kvm_modules = ("kvm_intel", "kvm_amd") for kvm_module in kvm_modules: kvm_module_path = "/sys/module/%s/parameters/nested" % kvm_module try: with open(kvm_module_path) as f: if f.readline().strip() in ("Y", "1"): return NestedVirtualization(True, kvm_module) except IOError as e: if e.errno != errno.ENOENT: logging.exception('Error checking %s nested virtualization', kvm_module) else: logging.debug('%s nested virtualization not detected', kvm_module) logging.debug('Could not determine status of nested ' 'virtualization') return NestedVirtualization(False, None)
def is_supported(): try: iommu_groups_exist = bool(len(os.listdir('/sys/kernel/iommu_groups'))) if cpuarch.is_ppc(cpuarch.real()): return iommu_groups_exist dmar_exists = bool(len(os.listdir('/sys/class/iommu'))) return iommu_groups_exist and dmar_exists except OSError: return False
def _is_hugetlbfs_1g_mounted(mtab_path='/etc/mtab'): if cpuarch.is_ppc(cpuarch.real()) or 'pdpe1gb' not in cpuinfo.flags(): return True with open(mtab_path, 'r') as f: for line in f: if '/dev/hugepages1G' in line: return True return False
def getHardwareInfo(*args, **kwargs): arch = cpuarch.real() if cpuarch.is_x86(arch): from vdsm.dmidecodeUtil import getHardwareInfoStructure return getHardwareInfoStructure() elif cpuarch.is_ppc(arch): from vdsm.ppc64HardwareInfo import getHardwareInfoStructure return getHardwareInfoStructure() else: # not implemented over other architecture return {}
class Tap(Interface): _IFF_TAP = 0x0002 _IFF_NO_PI = 0x1000 arch = cpuarch.real() if arch in (cpuarch.X86_64, cpuarch.S390X): _TUNSETIFF = 0x400454CA elif cpuarch.is_ppc(arch): _TUNSETIFF = 0x800454CA else: pytest.skip("Unsupported Architecture %s" % arch) _device_listener = None def create(self): self._clone_device = open('/dev/net/tun', 'r+b', buffering=0) ifr = struct.pack(b'16sH', self.dev_name.encode(), self._IFF_TAP | self._IFF_NO_PI) fcntl.ioctl(self._clone_device, self._TUNSETIFF, ifr) self.set_managed() self.up() def remove(self): self.down() self._clone_device.close() def start_listener(self, icmp): self._device_listener = Process(target=_listen_on_device, args=(self._clone_device.fileno(), icmp)) self._device_listener.start() def is_listener_alive(self): if self._device_listener: return self._device_listener.is_alive() else: return False def stop_listener(self): if self._device_listener: os.kill(self._device_listener.pid, signal.SIGKILL) self._device_listener.join() def write_to_device(self, icmp): os.write(self._clone_device.fileno(), icmp)
class Tap(Interface): _IFF_TAP = 0x0002 _IFF_NO_PI = 0x1000 arch = cpuarch.real() if arch in (cpuarch.X86_64, cpuarch.S390X): _TUNSETIFF = 0x400454ca elif cpuarch.is_ppc(arch): _TUNSETIFF = 0x800454ca else: raise SkipTest("Unsupported Architecture %s" % arch) _deviceListener = None def addDevice(self): self._cloneDevice = open('/dev/net/tun', 'r+b', buffering=0) ifr = struct.pack(b'16sH', self.devName.encode(), self._IFF_TAP | self._IFF_NO_PI) fcntl.ioctl(self._cloneDevice, self._TUNSETIFF, ifr) self.up() def delDevice(self): self._down() self._cloneDevice.close() def startListener(self, icmp): self._deviceListener = Process(target=_listenOnDevice, args=(self._cloneDevice.fileno(), icmp)) self._deviceListener.start() def isListenerAlive(self): if self._deviceListener: return self._deviceListener.is_alive() else: return False def stopListener(self): if self._deviceListener: os.kill(self._deviceListener.pid, signal.SIGKILL) self._deviceListener.join() def writeToDevice(self, icmp): os.write(self._cloneDevice.fileno(), icmp)
def nested_virtualization(): if cpuarch.is_ppc(cpuarch.real()): return NestedVirtualization(False, None) if cpuarch.is_s390(cpuarch.real()): kvm_modules = ("kvm",) else: kvm_modules = ("kvm_intel", "kvm_amd") for kvm_module in kvm_modules: kvm_module_path = "/sys/module/%s/parameters/nested" % kvm_module try: with open(kvm_module_path) as f: if f.readline().strip() in ("Y", "1"): return NestedVirtualization(True, kvm_module) except IOError: logging.debug('%s nested virtualization ' 'not detected' % kvm_module, exc_info=True) logging.debug('Could not determine status of nested ' 'virtualization') return NestedVirtualization(False, None)
def _fake_caps_arch(caps, arch): ''' Mutate 'caps' to act as an architecture set by fake_kvm_architecture configuration option. Arguments: caps The host capabilities as returned by hooking.read_json. ''' arch = arch caps['kvmEnabled'] = True if cpuarch.is_x86(arch): caps['emulatedMachines'] = _X86_64_MACHINES caps['cpuModel'] = 'Intel(Fake) CPU' flag_list = ['vmx', 'sse2', 'nx'] if cpuarch.real() == cpuarch.X86_64: flag_list += cpuinfo.flags() flags = set(flag_list) caps['cpuFlags'] = ','.join(flags) + ',model_486,model_pentium,' \ 'model_pentium2,model_pentium3,model_pentiumpro,' \ 'model_qemu32,model_coreduo,model_core2duo,model_n270,' \ 'model_Conroe,model_Penryn,model_Nehalem,model_Opteron_G1' elif cpuarch.is_ppc(arch): caps['emulatedMachines'] = _PPC64LE_MACHINES caps['cpuModel'] = 'POWER 8(fake)' caps['cpuFlags'] = 'powernv,model_POWER8' elif cpuarch.is_arm(arch): caps['emulatedMachines'] = _AARCH64_MACHINES caps['cpuModel'] = 'AARCH64 (fake)' caps['cpuFlags'] = '' else: raise cpuarch.UnsupportedArchitecture(arch)
def compatible(model, vendor): if not vendor: return False mode_xml = '' # POWER CPUs are special case because we run them using versioned # compat mode (aka host-model). Libvirt's compareCPU call uses the # selected mode - we have to be sure to tell it to compare CPU # capabilities based on the compat features, not the CPU itself. if cpuarch.is_ppc(cpuarch.real()): mode_xml = " mode='host-model'" model = model.lower() xml = '<cpu match="minimum"%s><model>%s</model>' \ '<vendor>%s</vendor></cpu>' % (mode_xml, model, vendor) try: return c.compareCPU(xml, 0) in (libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) except libvirt.libvirtError as e: # hack around libvirt BZ#795836 if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: return False raise
def uuid(): host_UUID = None try: if os.path.exists(P_VDSM_NODE_ID): with open(P_VDSM_NODE_ID) as f: host_UUID = f.readline().replace("\n", "") else: arch = cpuarch.real() if cpuarch.is_x86(arch): ret, out, err = execCmd([constants.EXT_DMIDECODE, "-s", "system-uuid"], raw=True, sudo=True) out = '\n'.join(line for line in out.splitlines() if not line.startswith('#')) if ret == 0 and 'Not' not in out: # Avoid error string - 'Not Settable' or 'Not Present' host_UUID = out.strip() else: logging.warning('Could not find host UUID.') elif cpuarch.is_ppc(arch): # eg. output IBM,03061C14A try: with open('/proc/device-tree/system-id') as f: systemId = f.readline() host_UUID = systemId.rstrip('\0').replace(',', '') except IOError: logging.warning('Could not find host UUID.') except: logging.error("Error retrieving host UUID", exc_info=True) return host_UUID
def _caps_arch_element(capfile, arch): with open(capfile) as xml: cpu_map = ET.fromstring(xml.read()) # In libvirt CPU map XML, both x86_64 and x86 are # the same architecture, so in order to find all # the CPU models for this architecture, 'x86' # must be used if cpuarch.is_x86(arch): arch = 'x86' if cpuarch.is_ppc(arch): arch = 'ppc64' arch_element = None arch_elements = cpu_map.findall('arch') if arch_elements: for element in arch_elements: if element.get('name') == arch: arch_element = element return arch_element
def _cpuinfo(): ''' Parse cpuinfo-like file, keeping the values in module's runtime variables. Arguments: source Optional. Accepts a string indicating path to the cpuinfo-like file. If not supplied, default path (/proc/cpuinfo) is used. ''' fields = {} if cpuarch.is_ppc(cpuarch.real()): fields['flags'] = ['powernv'] if cpuarch.is_x86(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' if cpuarch.is_arm(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' if cpuarch.is_s390(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' with open(_PATH) as info: for line in info: if not line.strip(): continue key, value = [part.strip() for part in line.split(':', 1)] if key == 'flags': # x86_64 fields['flags'] = value.split() elif key == 'Features': # aarch64 fields['flags'] = value.split() elif key == 'features': # s390 fields['flags'] = value.split() elif key == 'cpu MHz': # x86_64 fields['frequency'] = value elif key == 'BogoMIPS': # aarch64 fields['frequency'] = value elif key == 'clock': # ppc64, ppc64le fields['frequency'] = value[:-3] elif key == 'cpu MHz dynamic': # s390 # s390 reports both static and dynamic frequencies with # dynamic <= stat (nominal), so dynamic matches the # x86_64 frequency semantics. fields['frequency'] = value elif key == 'model name': # x86_64 fields['model'] = value elif key == 'CPU part': # aarch64 fields['model'] = value elif re.match(r'processor \d+', key): # s390 match = re.search(r'\bmachine\s*=\s*(\w+)', value) if match: fields['model'] = match.group(1) elif key == 'model': # ppc64le fields['ppcmodel'] = value elif key == 'cpu': # ppc64, ppc64le fields['model'] = value elif key == 'platform': # ppc64, ppc64le fields['platform'] = value elif key == 'machine': # ppc64, ppc64le fields['machine'] = value if len(fields) == 6: break # Older s390 machine versions don't report frequency. if 'frequency' not in fields: fields['frequency'] = 'unavailable' return CpuInfo(**fields)
def appendCpu(self, hugepages_shared=False): """ Add guest CPU definition. <cpu match="exact"> <model>qemu64</model> <topology sockets="S" cores="C" threads="T"/> <feature policy="require" name="sse2"/> <feature policy="disable" name="svm"/> </cpu> For POWER8, there is no point in trying to use baseline CPU for flags since there are only HW features. There are 2 ways of creating a valid POWER8 element that we support: <cpu> <model>POWER{X}</model> </cpu> This translates to -cpu POWER{X} (where {X} is version of the processor - 7 and 8), which tells qemu to emulate the CPU in POWER8 family that it's capable of emulating - in case of hardware virtualization, that will be the host cpu (so an equivalent of -cpu host). Using this option does not limit migration between POWER8 machines - it is still possible to migrate from e.g. POWER8 to POWER8e. The second option is not supported and serves only for reference: <cpu mode="host-model"> <model>power{X}</model> </cpu> where {X} is the binary compatibility version of POWER that we require (6, 7, 8). This translates to qemu's -cpu host,compat=power{X}. Using the second option also does not limit migration between POWER8 machines - it is still possible to migrate from e.g. POWER8 to POWER8e. """ cpu = vmxml.Element('cpu') if cpuarch.is_x86(self.arch): cpu.setAttrs(match='exact') features = self.conf.get('cpuType', 'qemu64').split(',') model = features[0] if model == 'hostPassthrough': cpu.setAttrs(mode='host-passthrough') elif model == 'hostModel': cpu.setAttrs(mode='host-model') else: cpu.appendChildWithArgs('model', text=model) # This hack is for backward compatibility as the libvirt # does not allow 'qemu64' guest on intel hardware if model == 'qemu64' and '+svm' not in features: features += ['-svm'] for feature in features[1:]: # convert Linux name of feature to libvirt if feature[1:6] == 'sse4_': feature = feature[0] + 'sse4.' + feature[6:] featureAttrs = {'name': feature[1:]} if feature[0] == '+': featureAttrs['policy'] = 'require' elif feature[0] == '-': featureAttrs['policy'] = 'disable' cpu.appendChildWithArgs('feature', **featureAttrs) elif cpuarch.is_ppc(self.arch): features = self.conf.get('cpuType', 'POWER8').split(',') model = features[0] cpu.appendChildWithArgs('model', text=model) if ('smpCoresPerSocket' in self.conf or 'smpThreadsPerCore' in self.conf): maxVCpus = int(self._getMaxVCpus()) cores = int(self.conf.get('smpCoresPerSocket', '1')) threads = int(self.conf.get('smpThreadsPerCore', '1')) cpu.appendChildWithArgs('topology', sockets=str(maxVCpus // cores // threads), cores=str(cores), threads=str(threads)) # CPU-pinning support # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning if 'cpuPinning' in self.conf: cputune = vmxml.Element('cputune') cpuPinning = self.conf.get('cpuPinning') for cpuPin in sorted(cpuPinning.keys()): cputune.appendChildWithArgs('vcpupin', vcpu=cpuPin, cpuset=cpuPinning[cpuPin]) self.dom.appendChild(cputune) # Guest numa topology support # see http://www.ovirt.org/Features/NUMA_and_Virtual_NUMA if 'guestNumaNodes' in self.conf: numa = vmxml.Element('numa') guestNumaNodes = sorted(self.conf.get('guestNumaNodes'), key=itemgetter('nodeIndex')) for vmCell in guestNumaNodes: nodeMem = int(vmCell['memory']) * 1024 numa_args = {'cpus': vmCell['cpus'], 'memory': str(nodeMem)} if hugepages_shared: numa_args.update({'memAccess': 'shared'}) numa.appendChildWithArgs('cell', **numa_args) cpu.appendChild(numa) self.dom.appendChild(cpu)