def test_remove_child(self): top = vmxml.find_first(self._dom, 'topelement') hello = list(vmxml.find_all(top, 'hello')) old = hello[1] vmxml.remove_child(top, old) updated_hello = list(vmxml.find_all(top, 'hello')) hello = hello[:1] + hello[2:] self.assertEqual(updated_hello, hello)
def replace_cpu_pinning(vm, dom, target_vcpupin): """ Replace <vcpupin> elements in <cputune>. This removes all vCPU pinning added by VDSM and honors list of manaually pinned CPUs. It then adds and replaces remainging <vcpupin> elements to match requested vCPU pinning. :param vm: associated VM object :type vm: vdsm.virt.VM :param dom: DOM of the libvirt XML :type dom: xml.etree.ElementTree.Element :param target_vcpupin: Dictionary describing requested vCPU pinning -- key: vCPU ID, value: cpuset. :type target_vcpupin: dict """ cputune = dom.find('cputune') if cputune is not None: for vcpu in vmxml.find_all(cputune, 'vcpupin'): vcpu_id = int(vcpu.get('vcpu')) if (vm.cpu_policy() == CPU_POLICY_MANUAL and vcpu_id in vm.manually_pinned_cpus()): continue cputune.remove(vcpu) # Reconfigure CPU pinning based on the call parameter if target_vcpupin is not None and len(target_vcpupin) > 0: if type(target_vcpupin) == dict: target_vcpupin = _flatten_cpusets(target_vcpupin) else: # Make sure we don't modify original list target_vcpupin = list(target_vcpupin) if cputune is None: cputune = ET.Element('cputune') dom.append(cputune) # First modify existing elements for vcpupin in vmxml.find_all(cputune, 'vcpupin'): vcpu_id = int(vcpupin.get('vcpu')) if vcpu_id >= 0 and vcpu_id < len(target_vcpupin): vcpupin.set('cpuset', str(target_vcpupin[vcpu_id])) target_vcpupin[vcpu_id] = None # Now create elements for pinning that was not there before. # This should happen only for pinning that was removed above. It # should not happen for manual CPU pinning because it would render # the value of manuallyPinedCPUs metadata invalid. for vcpu_id, cpuset in enumerate(target_vcpupin): if cpuset is None: continue vcpupin = ET.Element('vcpupin') vcpupin.set('vcpu', str(vcpu_id)) vcpupin.set('cpuset', str(cpuset)) cputune.append(vcpupin) return dom
def all_channels(self): if self.devices: for channel in vmxml.find_all(self.devices, 'channel'): name = vmxml.find_attr(channel, 'target', 'name') path = vmxml.find_attr(channel, 'source', 'path') if name and path: yield name, path
def test_find_all(self, start_tag, tag, number): dom = self._dom if start_tag is not None: dom = vmxml.find_first(self._dom, 'topelement') elements = vmxml.find_all(dom, tag) matches = [vmxml.tag(e) == tag for e in elements] self.assertTrue(all(matches)) self.assertEqual(len(matches), number)
def vnuma_count(self): """ :return: Number of vNUMA cells defined in VM. Zero is returned when NUMA is not defined. """ numa = vmxml.find_first(self._dom, 'cpu/numa', None) if numa is None: return 0 return len(list(vmxml.find_all(numa, 'cell')))
def from_xml_tree(cls, log, dev, meta): params = { 'device': core.find_device_type(dev), 'type': dev.tag, 'custom': meta.get('custom', {}), 'vmid': meta['vmid'], 'vm_custom': {}, 'specParams': {}, } core.update_device_params(params, dev) params.update(core.get_xml_elem(dev, 'macAddr', 'mac', 'address')) params.update(core.get_xml_elem(dev, 'nicModel', 'model', 'type')) params.update(core.get_xml_elem(dev, 'bootOrder', 'boot', 'order')) if params['device'] == 'hostdev': params.update(_get_hostdev_params(dev)) link = vmxml.find_first(dev, 'link', None) if link is not None: if link.attrib.get('state', 'down') == 'down': params['linkActive'] = False else: params['linkActive'] = True vlan = vmxml.find_first(dev, 'vlan', None) if vlan is not None: params['specParams']['vlanid'] = vmxml.find_attr(vlan, 'tag', 'id') mtu = vmxml.find_first(dev, "mtu", None) if mtu is not None: params['mtu'] = int(vmxml.attr(mtu, 'size')) filterref = vmxml.find_first(dev, 'filterref', None) if filterref is not None: params['filter'] = vmxml.attr(filterref, 'filter') params['filterParameters'] = [{ 'name': param.attrib['name'], 'value': param.attrib['value'], } for param in vmxml.find_all(filterref, 'parameter')] driver = vmxml.find_first(dev, 'driver', None) if driver is not None: params['custom'].update( core.parse_device_attrs(driver, ('queues', ))) sndbuf = dev.find('./tune/sndbuf') if sndbuf is not None: params['vm_custom']['sndbuf'] = vmxml.text(sndbuf) bandwidth = vmxml.find_first(dev, 'bandwidth', None) if bandwidth is not None: for mode in ('inbound', 'outbound'): elem = vmxml.find_first(bandwidth, mode, None) if elem is not None: params['specParams'][mode] = elem.attrib.copy() net = (meta.get('network', None) or vmxml.find_attr(dev, 'source', 'bridge')) if net is None: raise MissingNetwork("no network to join") params['network'] = net _update_port_mirroring(params, meta) core.update_device_params_from_meta(params, meta) return cls(log, **params)
def _update_source_params(params, disk_type, source): path = None if disk_type == 'block': path = source.attrib.get('dev') elif disk_type == 'file': path = source.attrib.get('file') elif 'protocol' in source.attrib: path = source.attrib.get('name') params['protocol'] = source.attrib.get('protocol') params['hosts'] = [ host.attrib.copy() for host in vmxml.find_all(source, 'host') ] params['path'] = path
def io_tune_dom_all_to_list(dom): """ This method converts all VmDiskDeviceTuneLimits structures in the XML to a list of dictionaries :param dom: XML DOM object to parse :return: List of VmDiskDeviceTuneLimits dictionaries """ tunables = [] for device in vmxml.find_all(dom, "device"): tunables.append(io_tune_dom_to_values(device)) return tunables
def all_channels(self): """ Returns a tuple (name, path, state) for each channel device in domain XML. Name and path are always non-empty strings, state is non-empty string (connected/disconnected) or None if the channel state is unknown. """ if self.devices is not None: for channel in vmxml.find_all(self.devices, 'channel'): name = vmxml.find_attr(channel, 'target', 'name') path = vmxml.find_attr(channel, 'source', 'path') state = vmxml.find_attr(channel, 'target', 'state') if name and path: yield name, path, (None if not state else state)
def _migration_params(self, muri): params = {} if self._maxBandwidth: params[libvirt.VIR_MIGRATE_PARAM_BANDWIDTH] = self._maxBandwidth if self._parallel is not None: params[libvirt.VIR_MIGRATE_PARAM_PARALLEL_CONNECTIONS] = \ self._parallel if not self.tunneled: params[libvirt.VIR_MIGRATE_PARAM_URI] = str(muri) if self._consoleAddress: graphics = 'spice' if self._vm.hasSpice else 'vnc' params[libvirt.VIR_MIGRATE_PARAM_GRAPHICS_URI] = str( '%s://%s' % (graphics, self._consoleAddress) ) if self._encrypted: # Use the standard host name or IP address when checking # the remote certificate. Not the migration destination, # which may be e.g. an IP address from a migration # network, not present in the certificate. params[libvirt.VIR_MIGRATE_PARAM_TLS_DESTINATION] = \ normalize_literal_addr(self.remoteHost) xml = self._vm.migratable_domain_xml() # REQUIRED_FOR: destination Vdsm < 4.3 if self._legacy_payload_path is not None: alias, path = self._legacy_payload_path dom = xmlutils.fromstring(xml) source = dom.find(".//alias[@name='%s']/../source" % (alias,)) source.set('file', path) xml = xmlutils.tostring(dom) # Remove & replace CPU pinning added by VDSM dom = xmlutils.fromstring(xml) cputune = dom.find('cputune') if cputune is not None: for vcpu in vmxml.find_all(cputune, 'vcpupin'): vcpu_id = int(vcpu.get('vcpu')) if (self._vm.cpu_policy() == cpumanagement.CPU_POLICY_MANUAL and vcpu_id in self._vm.manually_pinned_cpus()): continue cputune.remove(vcpu) if self._destination_cpusets is not None: if cputune is None: cputune = xml.etree.ElementTree.Element('cputune') dom.append(cputune) for vcpupin in self._destination_cpusets: cputune.append(vcpupin) xml = xmlutils.tostring(dom) self._vm.log.debug("Migrating domain XML: %s", xml) params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml return params
def _update_source_params(params, disk_type, source): path = None if source is None: path = '' elif disk_type == 'block': path = source.attrib.get('dev') elif disk_type == 'file': path = source.attrib.get('file', '') elif 'protocol' in source.attrib: path = source.attrib.get('name') params['protocol'] = source.attrib.get('protocol') params['hosts'] = [ host.attrib.copy() for host in vmxml.find_all(source, 'host') ] params['path'] = path
def pinned_cpus(self): """ :return: A dictionary in which key is vCPU ID and value is a frozenset with IDs of pCPUs the vCPU is pinned to. If a vCPU is not pinned to any pCPU it is not listed in the dictionary. Empty dictionary is returned if none of the vCPUs has a pinning defined. """ cputune = vmxml.find_first(self._dom, 'cputune', None) if cputune is None: return {} pinning = dict() for vcpupin in vmxml.find_all(cputune, 'vcpupin'): cpuset = vcpupin.get('cpuset', None) vcpu = vcpupin.get('vcpu', None) if vcpu is not None and cpuset is not None: cpus = taskset.cpulist_parse(cpuset) if len(cpus) > 0: pinning[int(vcpu)] = cpus return pinning
def _migration_params(self, muri): params = {} if self._maxBandwidth: params[libvirt.VIR_MIGRATE_PARAM_BANDWIDTH] = self._maxBandwidth if self._parallel is not None: params[libvirt.VIR_MIGRATE_PARAM_PARALLEL_CONNECTIONS] = \ self._parallel if not self.tunneled: params[libvirt.VIR_MIGRATE_PARAM_URI] = str(muri) if self._consoleAddress: graphics = 'spice' if self._vm.hasSpice else 'vnc' params[libvirt.VIR_MIGRATE_PARAM_GRAPHICS_URI] = str( '%s://%s' % (graphics, self._consoleAddress)) if self._encrypted: # Use the standard host name or IP address when checking # the remote certificate. Not the migration destination, # which may be e.g. an IP address from a migration # network, not present in the certificate. params[libvirt.VIR_MIGRATE_PARAM_TLS_DESTINATION] = \ normalize_literal_addr(self.remoteHost) xml = self._vm.migratable_domain_xml() # REQUIRED_FOR: destination Vdsm < 4.3 dom = xmlutils.fromstring(xml) if self._legacy_payload_path is not None: alias, path = self._legacy_payload_path source = dom.find(".//alias[@name='%s']/../source" % (alias, )) source.set('file', path) # Remove & replace CPU pinning added by VDSM dom = cpumanagement.replace_cpu_pinning(self._vm, dom, self._destination_cpusets) if self._destination_numa_nodesets is not None: numatune = dom.find('numatune') if numatune is not None: for memnode in vmxml.find_all(numatune, 'memnode'): cellid = int(memnode.get('cellid')) if (cellid >= 0 and cellid < len(self._destination_numa_nodesets)): memnode.set('nodeset', self._destination_numa_nodesets[cellid]) xml = xmlutils.tostring(dom) self._vm.log.debug("Migrating domain XML: %s", xml) params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml return params
def _update_source_params(params, disk_type, source): path = None if source is None: path = '' elif disk_type == 'block': path = source.attrib.get('dev') reservations = vmxml.find_first(source, 'reservations', None) if (reservations is not None and reservations.attrib.get('managed') == 'yes'): params['managed_reservation'] = True elif disk_type == 'file': path = source.attrib.get('file', '') elif 'protocol' in source.attrib: path = source.attrib.get('name') params['protocol'] = source.attrib.get('protocol') params['hosts'] = [ host.attrib.copy() for host in vmxml.find_all(source, 'host') ] params['path'] = path
def is_vnc_secure(vmParams, log): """ This function checks if VNC is not mis-configured to offer insecure, free-for-all access. The engine can send the XML with empty password, but it's acceptable IFF qemu uses SASL as the authentication mechanism. is_vnc_secure returns False in such case (i.e. no password and no SASL), otherwise VNC connection is considered secure. """ parsed = xmlutils.fromstring(vmParams['xml']) graphics = vmxml.find_all(parsed, 'graphics') for g in graphics: if vmxml.attr(g, 'type') == 'vnc': # When the XML does not contain 'passwordValidTo' attribute # this is a way to say 'don't use password auth'. no_password_auth = vmxml.attr(g, 'passwdValidTo') == '' if no_password_auth and not utils.sasl_enabled(): log.warning("VNC not secure: passwdValidTo empty or missing" " and SASL not configured") return False return True
def from_xml_tree(cls, log, dev, meta): params = { 'custom': {}, 'vm_custom': {}, 'specParams': {}, } params.update(core.parse_device_params(dev)) params['device'] = params['type'] params.update(_get_xml_elem(dev, 'macAddr', 'mac', 'address')) params.update(_get_xml_elem(dev, 'nicModel', 'model', 'type')) params.update(_get_xml_elem(dev, 'bootOrder', 'boot', 'order')) filterref = vmxml.find_first(dev, 'filterref', None) if filterref is not None: params['filter'] = vmxml.attr(filterref, 'filter') params['filterParameters'] = { param.attrib['name']: param.attrib['value'] for param in vmxml.find_all(filterref, 'parameter') } driver = vmxml.find_first(dev, 'driver', None) if driver is not None: params['custom'].update( core.parse_device_attrs(driver, ('queues', ))) sndbuf = dev.find('./tune/sndbuf') if sndbuf is not None: params['vm_custom']['sndbuf'] = vmxml.text(sndbuf) bandwidth = vmxml.find_first(dev, 'bandwidth', None) if bandwidth is not None: for mode in ('inbound', 'outbound'): elem = vmxml.find_first(bandwidth, mode, None) if elem is not None: params['specParams'][mode] = elem.attrib.copy() net = (meta.get('network', None) or vmxml.find_attr(dev, 'source', 'bridge')) if net is None: raise RuntimeError('could not detect the network to join') params['network'] = net return cls(log, **params)
def get_device_elements(self, tagName): return vmxml.find_all(self.devices, tagName)
def test_attributes(self, tag, index, result): element = list(vmxml.find_all(self._dom, tag))[index] self.assertEqual(vmxml.attributes(element), result)
def get_device_elements_with_attrs(self, tag_name, **kwargs): for element in vmxml.find_all(self.devices, tag_name): if all( vmxml.attr(element, key) == value for key, value in kwargs.items()): yield element