def appendOs(self, use_serial_console=False): """ Add <os> element to domain: <os> <type arch="x86_64" machine="pc">hvm</type> <boot dev="cdrom"/> <kernel>/tmp/vmlinuz-2.6.18</kernel> <initrd>/tmp/initrd-2.6.18.img</initrd> <cmdline>ARGs 1</cmdline> <smbios mode="sysinfo"/> </os> If 'use_serial_console' is true and we are on x86, use the console: <os> ... <bios useserial="yes"/> </os> """ oselem = vmxml.Element('os') self.dom.appendChild(oselem) machine = self.conf.get('emulatedMachine', _DEFAULT_MACHINES[self.arch]) oselem.appendChildWithArgs('type', text='hvm', arch=self.arch, machine=machine) qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'} for c in self.conf.get('boot', ''): oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c]) if self.conf.get('initrd'): oselem.appendChildWithArgs('initrd', text=self.conf['initrd']) if self.conf.get('kernel'): oselem.appendChildWithArgs('kernel', text=self.conf['kernel']) if self.conf.get('kernelArgs'): oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs']) if cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('smbios', mode='sysinfo') if conv.tobool(self.conf.get('bootMenuEnable', False)): oselem.appendChildWithArgs('bootmenu', enable='yes', timeout=str(_BOOT_MENU_TIMEOUT)) if use_serial_console and cpuarch.is_x86(self.arch): oselem.appendChildWithArgs('bios', useserial='yes')
def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"/> </clock> for hyperv (on x86): <clock offset="variable" adjustment="-3600"> <timer name="hypervclock" present="yes"/> <timer name="rtc" tickpolicy="catchup"/> <timer name="hpet" present="no"/> </clock> """ m = vmxml.Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) if conv.tobool(self.conf.get('hypervEnable', 'false')): m.appendChildWithArgs('timer', name='hypervclock', present='yes') m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if cpuarch.is_x86(self.arch): m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m)
def _parse_domain_cpu(dom, conf, arch): cpu_topology = dom.find('./cpu/topology') if cpu_topology is not None: cores = cpu_topology.attrib['cores'] threads = cpu_topology.attrib['threads'] sockets = cpu_topology.attrib['sockets'] conf['smpCoresPerSocket'] = cores conf['smpThreadsPerCore'] = threads conf['maxVCpus'] = str(int(sockets) * int(cores) * int(threads)) cpu_tune = dom.find('./cputune') if cpu_tune is not None: cpu_pinning = {} for cpu_pin in dom.findall('./cputune/vcpupin'): cpu_pinning[cpu_pin.attrib['vcpu']] = cpu_pin.attrib['cpuset'] if cpu_pinning: conf['cpuPinning'] = cpu_pinning cpu_numa = dom.find('./cpu/numa') if cpu_numa is not None: guest_numa_nodes = [] for index, cell in enumerate(dom.findall('./cpu/numa/cell')): guest_numa_nodes.append({ 'nodeIndex': index, 'cpus': ','.join(_expand_list(cell.attrib['cpus'])), 'memory': str(int(cell.attrib['memory']) // 1024), }) conf['guestNumaNodes'] = guest_numa_nodes if cpuarch.is_x86(arch): _parse_domain_cpu_x86(dom, conf) elif cpuarch.is_ppc(arch): _parse_domain_cpu_ppc(dom, conf)
def uuid(): host_UUID = None try: if os.path.exists(P_VDSM_NODE_ID): with open(P_VDSM_NODE_ID) as f: host_UUID = f.readline().replace("\n", "") else: arch = cpuarch.real() if cpuarch.is_x86(arch): try: hw_info = dmidecodeUtil.getHardwareInfoStructure() host_UUID = hw_info['systemUUID'].lower() except KeyError: logging.warning('Could not find host UUID.') elif cpuarch.is_ppc(arch): # eg. output IBM,03061C14A try: with open('/proc/device-tree/system-id') as f: systemId = f.readline() host_UUID = systemId.rstrip('\0').replace(',', '') except IOError: logging.warning('Could not find host UUID.') except: logging.error("Error retrieving host UUID", exc_info=True) return host_UUID
def getHardwareInfo(*args, **kwargs): arch = cpuarch.real() if cpuarch.is_x86(arch): from vdsm.dmidecodeUtil import getHardwareInfoStructure return getHardwareInfoStructure() elif cpuarch.is_ppc(arch): from vdsm.ppc64HardwareInfo import getHardwareInfoStructure return getHardwareInfoStructure() else: # not implemented over other architecture return {}
def replace_placeholders(dom, arch, serial=None): """ Replace the placeholders, if any, in the domain XML. This is the entry point orchestration function. See the documentation of the specific functions for the supported placeholders. """ if cpuarch.is_x86(arch): osd = osinfo.version() os_version = osd.get('version', '') + '-' + osd.get('release', '') serial_number = host.uuid() if serial is None else serial libvirtxml.update_sysinfo(dom, osd['name'], os_version, serial_number)
def appendInput(self): """ Add input device. <input bus="ps2" type="mouse"/> """ if conv.tobool(self.conf.get('tabletEnable')): inputAttrs = {'type': 'tablet', 'bus': 'usb'} elif cpuarch.is_x86(self.arch): inputAttrs = {'type': 'mouse', 'bus': 'ps2'} else: inputAttrs = {'type': 'mouse', 'bus': 'usb'} self._devices.appendChildWithArgs('input', **inputAttrs)
def make_minimal_domain(dom): """ Enhance a Domain object, appending all the elements which - are not devices - which require extra logic - don't need additional logic or parameters, besides the trivial check on the CPU architecture. Args: dom (libvirtxml.Domain): domain object to enhance. It is recommended to use a freshly-built domain object, whose append* methods are not yet being called. Example: dom = make_minimal_domain(Domain(conf, log, arch)) """ dom.appendMetadata() dom.appendClock() if cpuarch.is_x86(dom.arch): dom.appendFeatures() return dom
def _fake_caps_arch(caps, arch): ''' Mutate 'caps' to act as an architecture set by fake_kvm_architecture configuration option. Arguments: caps The host capabilities as returned by hooking.read_json. ''' arch = arch caps['kvmEnabled'] = True if cpuarch.is_x86(arch): caps['emulatedMachines'] = _X86_64_MACHINES caps['cpuModel'] = 'Intel(Fake) CPU' flag_list = ['vmx', 'sse2', 'nx'] if cpuarch.real() == cpuarch.X86_64: flag_list += cpuinfo.flags() flags = set(flag_list) caps['cpuFlags'] = ','.join(flags) + ',model_486,model_pentium,' \ 'model_pentium2,model_pentium3,model_pentiumpro,' \ 'model_qemu32,model_coreduo,model_core2duo,model_n270,' \ 'model_Conroe,model_Penryn,model_Nehalem,model_Opteron_G1' elif cpuarch.is_ppc(arch): caps['emulatedMachines'] = _PPC64LE_MACHINES caps['cpuModel'] = 'POWER 8(fake)' caps['cpuFlags'] = 'powernv,model_POWER8' elif cpuarch.is_arm(arch): caps['emulatedMachines'] = _AARCH64_MACHINES caps['cpuModel'] = 'AARCH64 (fake)' caps['cpuFlags'] = '' else: raise cpuarch.UnsupportedArchitecture(arch)
def _caps_arch_element(capfile, arch): with open(capfile) as xml: cpu_map = ET.fromstring(xml.read()) # In libvirt CPU map XML, both x86_64 and x86 are # the same architecture, so in order to find all # the CPU models for this architecture, 'x86' # must be used if cpuarch.is_x86(arch): arch = 'x86' if cpuarch.is_ppc(arch): arch = 'ppc64' arch_element = None arch_elements = cpu_map.findall('arch') if arch_elements: for element in arch_elements: if element.get('name') == arch: arch_element = element return arch_element
def uuid(): host_UUID = None try: if os.path.exists(P_VDSM_NODE_ID): with open(P_VDSM_NODE_ID) as f: host_UUID = f.readline().replace("\n", "") else: arch = cpuarch.real() if cpuarch.is_x86(arch): ret, out, err = execCmd([constants.EXT_DMIDECODE, "-s", "system-uuid"], raw=True, sudo=True) out = '\n'.join(line for line in out.splitlines() if not line.startswith('#')) if ret == 0 and 'Not' not in out: # Avoid error string - 'Not Settable' or 'Not Present' host_UUID = out.strip() else: logging.warning('Could not find host UUID.') elif cpuarch.is_ppc(arch): # eg. output IBM,03061C14A try: with open('/proc/device-tree/system-id') as f: systemId = f.readline() host_UUID = systemId.rstrip('\0').replace(',', '') except IOError: logging.warning('Could not find host UUID.') except: logging.error("Error retrieving host UUID", exc_info=True) return host_UUID
def appendCpu(self, hugepages_shared=False): """ Add guest CPU definition. <cpu match="exact"> <model>qemu64</model> <topology sockets="S" cores="C" threads="T"/> <feature policy="require" name="sse2"/> <feature policy="disable" name="svm"/> </cpu> For POWER8, there is no point in trying to use baseline CPU for flags since there are only HW features. There are 2 ways of creating a valid POWER8 element that we support: <cpu> <model>POWER{X}</model> </cpu> This translates to -cpu POWER{X} (where {X} is version of the processor - 7 and 8), which tells qemu to emulate the CPU in POWER8 family that it's capable of emulating - in case of hardware virtualization, that will be the host cpu (so an equivalent of -cpu host). Using this option does not limit migration between POWER8 machines - it is still possible to migrate from e.g. POWER8 to POWER8e. The second option is not supported and serves only for reference: <cpu mode="host-model"> <model>power{X}</model> </cpu> where {X} is the binary compatibility version of POWER that we require (6, 7, 8). This translates to qemu's -cpu host,compat=power{X}. Using the second option also does not limit migration between POWER8 machines - it is still possible to migrate from e.g. POWER8 to POWER8e. """ cpu = vmxml.Element('cpu') if cpuarch.is_x86(self.arch): cpu.setAttrs(match='exact') features = self.conf.get('cpuType', 'qemu64').split(',') model = features[0] if model == 'hostPassthrough': cpu.setAttrs(mode='host-passthrough') elif model == 'hostModel': cpu.setAttrs(mode='host-model') else: cpu.appendChildWithArgs('model', text=model) # This hack is for backward compatibility as the libvirt # does not allow 'qemu64' guest on intel hardware if model == 'qemu64' and '+svm' not in features: features += ['-svm'] for feature in features[1:]: # convert Linux name of feature to libvirt if feature[1:6] == 'sse4_': feature = feature[0] + 'sse4.' + feature[6:] featureAttrs = {'name': feature[1:]} if feature[0] == '+': featureAttrs['policy'] = 'require' elif feature[0] == '-': featureAttrs['policy'] = 'disable' cpu.appendChildWithArgs('feature', **featureAttrs) elif cpuarch.is_ppc(self.arch): features = self.conf.get('cpuType', 'POWER8').split(',') model = features[0] cpu.appendChildWithArgs('model', text=model) if ('smpCoresPerSocket' in self.conf or 'smpThreadsPerCore' in self.conf): maxVCpus = int(self._getMaxVCpus()) cores = int(self.conf.get('smpCoresPerSocket', '1')) threads = int(self.conf.get('smpThreadsPerCore', '1')) cpu.appendChildWithArgs('topology', sockets=str(maxVCpus // cores // threads), cores=str(cores), threads=str(threads)) # CPU-pinning support # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning if 'cpuPinning' in self.conf: cputune = vmxml.Element('cputune') cpuPinning = self.conf.get('cpuPinning') for cpuPin in sorted(cpuPinning.keys()): cputune.appendChildWithArgs('vcpupin', vcpu=cpuPin, cpuset=cpuPinning[cpuPin]) self.dom.appendChild(cputune) # Guest numa topology support # see http://www.ovirt.org/Features/NUMA_and_Virtual_NUMA if 'guestNumaNodes' in self.conf: numa = vmxml.Element('numa') guestNumaNodes = sorted(self.conf.get('guestNumaNodes'), key=itemgetter('nodeIndex')) for vmCell in guestNumaNodes: nodeMem = int(vmCell['memory']) * 1024 numa_args = {'cpus': vmCell['cpus'], 'memory': str(nodeMem)} if hugepages_shared: numa_args.update({'memAccess': 'shared'}) numa.appendChildWithArgs('cell', **numa_args) cpu.appendChild(numa) self.dom.appendChild(cpu)
def _cpuinfo(): ''' Parse cpuinfo-like file, keeping the values in module's runtime variables. Arguments: source Optional. Accepts a string indicating path to the cpuinfo-like file. If not supplied, default path (/proc/cpuinfo) is used. ''' fields = {} if cpuarch.is_ppc(cpuarch.real()): fields['flags'] = ['powernv'] if cpuarch.is_x86(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' if cpuarch.is_arm(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' if cpuarch.is_s390(cpuarch.real()): fields['platform'] = 'unavailable' fields['machine'] = 'unavailable' fields['ppcmodel'] = 'unavailable' with open(_PATH) as info: for line in info: if not line.strip(): continue key, value = [part.strip() for part in line.split(':', 1)] if key == 'flags': # x86_64 fields['flags'] = value.split() elif key == 'Features': # aarch64 fields['flags'] = value.split() elif key == 'features': # s390 fields['flags'] = value.split() elif key == 'cpu MHz': # x86_64 fields['frequency'] = value elif key == 'BogoMIPS': # aarch64 fields['frequency'] = value elif key == 'clock': # ppc64, ppc64le fields['frequency'] = value[:-3] elif key == 'cpu MHz dynamic': # s390 # s390 reports both static and dynamic frequencies with # dynamic <= stat (nominal), so dynamic matches the # x86_64 frequency semantics. fields['frequency'] = value elif key == 'model name': # x86_64 fields['model'] = value elif key == 'CPU part': # aarch64 fields['model'] = value elif re.match(r'processor \d+', key): # s390 match = re.search(r'\bmachine\s*=\s*(\w+)', value) if match: fields['model'] = match.group(1) elif key == 'model': # ppc64le fields['ppcmodel'] = value elif key == 'cpu': # ppc64, ppc64le fields['model'] = value elif key == 'platform': # ppc64, ppc64le fields['platform'] = value elif key == 'machine': # ppc64, ppc64le fields['machine'] = value if len(fields) == 6: break # Older s390 machine versions don't report frequency. if 'frequency' not in fields: fields['frequency'] = 'unavailable' return CpuInfo(**fields)