def run(test, params, env): """ Test the virtio bus autommated assignement for passthrough devices 1. prepare a passthrough device xml without bus defined 2. start the guest and check if the device can be attached 3. check if the new device is properly listed in guest xml """ if not libvirt_version.version_compare(6, 3, 0): test.cancel('The feature of automatic assignment of virtio bus for ' 'passthrough devices is supported since version 6.3.0') vm_name = params.get("main_vm", "avocado-vt-vm1") # Create a new passthrough device without bus assigned input_dev = Input(type_name="passthrough") input_dev.source_evdev = "/dev/input/event1" xml = input_dev.get_xml() logging.debug('Attached device xml:\n{}'.format(input_dev.xmltreefile)) logging.debug('New Passthrough device XML is available at:{}'.format(xml)) # Start the VM vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() if vm.is_alive(): vm.destroy() vm_xml.remove_all_device_by_type('input') try: vm.start() vm.wait_for_login().close() # Attach new device and check for result cmd_result = virsh.attach_device(vm_name, input_dev.get_xml(), debug=True) if cmd_result.exit_status != 0: test.error(cmd_result.stderr_text) # Get the VM XML and check for a new device vm_xml = VMXML.new_from_dumpxml(vm_name) device_list = vm_xml.get_devices() for device in device_list: if device['device_tag'] == 'input': device_xml = device['xml'] # Create a new instance of Input device and fill with input # device found input_device = Input(type_name="passthrough") input_device.set_xml(device_xml) if input_device.type_name == "passthrough": with open(device_xml, 'r') as device_xml_file: for line in device_xml_file: logging.debug(line.rstrip()) if not input_device.input_bus == "virtio": test.fail("The newly attached passthrough device has no" " added virtio as a bus by default.") else: logging.debug("Newly added passthrough device has a " "virtio automatically assigned as a bus.") finally: if vm.is_alive(): virsh.destroy(vm_name) vm_xml_backup.sync()
def _find_comm_paths(session): if source_path is None: host_path = _get_autopath() else: host_path = source_path name_port_map = {} base_path = '/sys/class/virtio-ports' vports = session.cmd_output('ls %s' % base_path).split() status = session.cmd_status('ls %s/*/name' % base_path) if status == 0: for vport in vports: vport_path = os.path.join(base_path, vport) name_path = os.path.join(vport_path, 'name') name = session.cmd_output('cat %s' % name_path).strip() name_port_map[name] = vport if expect_name not in name_port_map: test.fail("Expect get vport name %s, got %s" % (expect_name, name_port_map)) vport = name_port_map[expect_name] else: active_xml = VMXML.new_from_dumpxml(vm_name) port_number = active_xml.xmltreefile.find( '/devices/channel/address').get('port') vport = 'vport1p%s' % port_number guest_path = '/dev/%s' % vport return guest_path, host_path
def get_search_patt_qemu_line(): """ Check if the guest XML has the expected content. :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3 """ cur_vm_xml = VMXML.new_from_dumpxml(vm_name) qemu_list = [] # Check the pci-root controller has index = 0 if no_pci_controller == "yes": (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, '0') return qemu_list # Check index numbers of pci-bridge controllers should be equal # to the pci_bus_number if int(pci_bus_number) > 0: return get_patt_non_zero_bus(cur_vm_xml, qemu_list) # All controllers should exist if there is a gap between two PCI # controller indexes if index and index_second and int(index) > 0 and int(index_second) > 0: for idx in range(int(index_second), int(index) + 1): (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, str(idx)) return qemu_list # All controllers should exist with index among [1..index] if index and int(index) > 0 and not index_second: for idx in range(1, int(index) + 1): (search_result, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, str(idx)) if not search_result: test.fail("Can not find %s controller " "with index %s." % (model, str(idx))) return qemu_list
def get_controller_addr(cntlr_type=None, model=None, index=None): """ Get the address of testing controller from VM XML as a string with format "bus:slot.function". :param cntlr_type: controller type :param model: controller model :param index: controller index :return: an address string of the specified controller """ if model in ['pci-root', 'pcie-root']: return None addr_str = None cur_vm_xml = VMXML.new_from_dumpxml(vm_name) for elem in cur_vm_xml.devices.by_device_tag('controller'): if ((cntlr_type is None or elem.type == cntlr_type) and (model is None or elem.model == model) and (index is None or elem.index == index)): addr_elem = elem.address if addr_elem is None: test.error("Can not find 'Address' " "element for the controller") bus = int(addr_elem.attrs.get('bus'), 0) slot = int(addr_elem.attrs.get('slot'), 0) func = int(addr_elem.attrs.get('function'), 0) addr_str = '%02d:%02x.%1d' % (bus, slot, func) logging.debug("Controller address is %s", addr_str) break return addr_str
def _find_comm_paths(session): if source_path is None: host_path = _get_autopath() else: host_path = source_path name_port_map = {} base_path = '/sys/class/virtio-ports' vports = session.cmd_output('ls %s' % base_path).split() status = session.cmd_status('ls %s/*/name' % base_path) if status == 0: for vport in vports: vport_path = os.path.join(base_path, vport) name_path = os.path.join(vport_path, 'name') name = session.cmd_output('cat %s' % name_path).strip() name_port_map[name] = vport if expect_name not in name_port_map: raise error.TestFail("Expect get vport name %s, got %s" % (expect_name, name_port_map)) vport = name_port_map[expect_name] else: active_xml = VMXML.new_from_dumpxml(vm_name) port_number = active_xml.xmltreefile.find( '/devices/channel/address').get('port') vport = 'vport1p%s' % port_number guest_path = '/dev/%s' % vport return guest_path, host_path
def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: pattern = "\'emulator\' version=\"%s\"" % backend_version if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % backend_version) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) logging.info('------PASS on guest dumpxml check------')
def is_hotunplug_interface_ok(): xml_after_detach = VMXML.new_from_dumpxml(vm_name) iface_list_after_detach = [ iface for iface in xml_after_detach.get_devices('interface') if iface.mac_address == mac ] logging.debug('iface list after detach: %s', iface_list_after_detach) return iface_list_after_detach == []
def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type) xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s input device xml " "in the guest xml file." % input_type)
def _check_xml(test): """ Check defined XML against expectation """ expected_channel = Channel(channel_type) try: source_dict = channel.source except LibvirtXMLNotFoundError: source_dict = {} if channel_type == 'pty': source_dict = {} elif channel_type == 'unix': if source_mode is None: if source_path: source_dict['mode'] = 'connect' else: source_dict['mode'] = 'bind' if source_path is None: source_dict['path'] = _get_autopath() if source_autopath: del source_dict['autopath'] target_dict = {} if target_type == 'virtio': expected_channel.address = { 'bus': '0', 'controller': '0', 'port': '1', 'type': 'virtio-serial', } if 'type' in channel.target: target_dict['type'] = channel.target['type'] if 'name' in channel.target: target_dict['name'] = channel.target['name'] elif target_type == 'guestfwd': if 'type' in channel.target: target_dict['type'] = channel.target['type'] if 'address' in channel.target: target_dict['address'] = channel.target['address'] if 'port' in channel.target: target_dict['port'] = channel.target['port'] if source_dict: expected_channel.source = source_dict if target_dict: expected_channel.target = target_dict current_xml = VMXML.new_from_dumpxml(vm_name) channel_elem = current_xml.xmltreefile.find('devices/channel') cur_channel = Channel.new_from_element(channel_elem) if not (expected_channel == cur_channel): test.fail("Expect generate channel:\n%s\nBut got:\n%s" % (expected_channel, cur_channel))
def _check_xml(): """ Check defined XML against expectation """ expected_channel = Channel(channel_type) try: source_dict = channel.source except LibvirtXMLNotFoundError: source_dict = {} if channel_type == 'pty': source_dict = {} elif channel_type == 'unix': if source_mode is None: if source_path: source_dict['mode'] = 'connect' else: source_dict['mode'] = 'bind' if source_path is None: source_dict['path'] = _get_autopath() if source_autopath: del source_dict['autopath'] target_dict = {} if target_type == 'virtio': expected_channel.address = { 'bus': '0', 'controller': '0', 'port': '1', 'type': 'virtio-serial', } if 'type' in channel.target: target_dict['type'] = channel.target['type'] if 'name' in channel.target: target_dict['name'] = channel.target['name'] elif target_type == 'guestfwd': if 'type' in channel.target: target_dict['type'] = channel.target['type'] if 'address' in channel.target: target_dict['address'] = channel.target['address'] if 'port' in channel.target: target_dict['port'] = channel.target['port'] if source_dict: expected_channel.source = source_dict if target_dict: expected_channel.target = target_dict current_xml = VMXML.new_from_dumpxml(vm_name) channel_elem = current_xml.xmltreefile.find('devices/channel') cur_channel = Channel.new_from_element(channel_elem) if not (expected_channel == cur_channel): raise error.TestFail("Expect generate channel:\n%s\nBut got:\n%s" % (expected_channel, cur_channel))
def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type) if with_packed: pattern = "<driver packed=\"%s\"" % (driver_packed) logging.debug('Searching for %s in vm xml', pattern) xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug('xml_after_adding_device:\n%s', xml_after_adding_device) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s input device xml " "in the guest xml file." % input_type)
def check_mem_test_xml(model_type, mem_type, mem_size): """ Check whether the added devices and attributes are shown in the guest xml """ pattern_model_type = "type=\'%s\'" % model_type pattern_mem = "%s=\'%s\'" % (mem_type, mem_size) xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name)) for line in xml_after_adding_device.splitlines(): if pattern_model_type in line: if pattern_mem not in line: test.fail("Can not find %s video device or memory mem_size" "for %s is not as settings in the xml" % (model_type, mem_type))
def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<sound model=\"%s\">" % sound_model # Check sound model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s sound device xml " "in the guest xml file." % sound_model) # Check codec type if codec_type: pattern = "<codec type=\"%s\" />" % codec_type if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s codec xml for sound dev " "in the guest xml file." % codec_type)
def get_controller_addr(cntlr_type=None, model=None, index=None, cntlr_bus=None): """ Get the address of testing controller from VM XML as a string with format a. "bus:slot.function" for pci address type b. "cssid:ssid.devno" for ccw address type :param cntlr_type: controller type, e.g. pci :param model: controller model, e.g. pcie-root-port :param index: controller index, e.g. '0' :param cntlr_bus: controller bus type, e.g. pci, ccw :return: a tuple including an address string, bus, slot, function, multifunction """ if model in ['pci-root', 'pcie-root']: return (None, None, None, None, None) addr_str = None cur_vm_xml = VMXML.new_from_dumpxml(vm_name) for elem in cur_vm_xml.devices.by_device_tag('controller'): if ((cntlr_type is None or elem.type == cntlr_type) and (model is None or elem.model == model) and (index is None or elem.index == index)): addr_elem = elem.address if addr_elem is None: test.error("Can not find 'Address' " "element for the controller") p4 = None if 'ccw' == cntlr_bus: p1 = int(addr_elem.attrs.get('cssid'), 0) p2 = int(addr_elem.attrs.get('ssid'), 0) p3 = int(addr_elem.attrs.get('devno'), 0) else: p1 = int(addr_elem.attrs.get('bus'), 0) p2 = int(addr_elem.attrs.get('slot'), 0) p3 = int(addr_elem.attrs.get('function'), 0) p4 = addr_elem.attrs.get('multifunction') addr_str = '%02d:%02x.%1d' % (p1, p2, p3) logging.debug("Controller address is %s", addr_str) return (addr_str, p1, p2, p3, p4) return (None, None, None, None, None)
def check_heads_test_xml(model_type, is_primary=None, **kwargs): """ Check whether the added devices and attributes are shown in the guest xml """ if is_primary or is_primary is None: model_heads = kwargs.get("model_heads", default_primary_heads) else: model_heads = kwargs.get("model_heads", default_secondary_heads) pattern_model_type = "type=\'%s\'" % model_type pattern_heads = "heads=\'%s\'" % model_heads xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name)) for line in xml_after_adding_device.splitlines(): if pattern_model_type in line: if pattern_heads not in line: test.fail("Can not find %s video device or heads num" "is not as settings in the xml" % model_type)
def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail( "Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------')
def check_vm_xml(existed=True, inactive=False): """ Check VM xml file to validate whether serial and console elements exists. :param existed: Default is True indicate whether element exist or not :param inactive: indicate VM xml is from active or inactive VM. """ # Get current serial and console XML current_xml = VMXML.new_from_dumpxml(vm_name) if inactive: current_xml = VMXML.new_from_inactive_dumpxml(vm_name) serial_elem = current_xml.xmltreefile.find('devices/serial') console_elem = current_xml.xmltreefile.find('devices/console') if existed: if serial_elem is None: test.fail("Expect generate serial" "but found none.") if target_type != 'pci-serial' and console_elem is None: test.fail("Expect generate console automatically, " "but found none.")
def get_disk_bus(disk_dev=None): """ Get the bus list of guest disks :param disk_dev: The specified disk device :return: list for disks' buses """ disk_bus_list = [] cur_vm_xml = VMXML.new_from_dumpxml(vm_name) disk_dev_list = cur_vm_xml.get_disk_blk(vm_name) if disk_dev not in disk_dev_list: return disk_bus_list for disk_index in range(0, len(disk_dev_list)): disk_target = disk_dev if disk_dev else disk_dev_list[disk_index] disk_bus = cur_vm_xml.get_disk_attr(vm_name, disk_target, 'address', 'bus') disk_bus_list.append(disk_bus) if disk_dev: break return disk_bus_list
def validate_multiple_controller(test, vm_name): """ Validate multiple controller. :param test: test itself :param vm_name: vm name """ expect_index_list = [ '0', '0', '0', '0', '1', '1', '1', '1', '2', '2', '2', '2' ] actual_index_list = [] vm_xml = VMXML.new_from_dumpxml(vm_name) controllers = vm_xml.get_devices(device_type="controller") devices = vm_xml.get_devices() for dev in controllers: if dev.type == "usb": actual_index_list.append(dev.index) for actual_index, expect_index in zip(actual_index_list, expect_index_list): if actual_index != expect_index: test.fail("usb controller are not organizated by index group")
def check_guest_contr(): """ Check the controller in guest xml :raise: test.fail if the controller does not meet the expectation """ cntl = None cur_vm_xml = VMXML.new_from_dumpxml(vm_name) for cntl in cur_vm_xml.devices.by_device_tag('controller'): if (cntl.type == 'pci' and cntl.model == contr_model and cntl.index == contr_index): logging.debug(cntl.target) cntl_hotplug = cntl.target.get('hotplug') logging.debug("Got controller's hotplug:%s", cntl_hotplug) if cntl_hotplug != hotplug_option: test.fail("The controller's hotplug option is {}, " "but expect {}".format(cntl_hotplug, hotplug_option)) break if not cntl: test.fail("The controller with index {} is not found".format( contr_index))
def _verify_attach_channel_device(char_type, port_id): """ Test unix socket communication between host and guest through channel :param char_type: the type of the channel :param port_id: the port id of the channel """ result = virsh.attach_device(vm_name, xml_file) if result.stderr: test.fail('Failed to attach %s to %s. Result:\n %s' % ('pty', vm_name, result)) current_xml = VMXML.new_from_dumpxml(vm_name) channel_devices = current_xml.get_devices('channel') found_dev = False for channel_device in channel_devices: if channel_device.address['port'] == port_id: found_dev = True break if not found_dev: logging.debug("Failed to find channel with port %s", port_id) return found_dev
def get_controller_addr(cntlr_type=None, model=None, index=None): """ Get the address of testing controller from VM XML as a string with format "bus:slot.function". """ cur_vm_xml = VMXML.new_from_dumpxml(vm_name) addr = None for elem in cur_vm_xml.xmltreefile.findall('/devices/controller'): if ( (cntlr_type is None or elem.get('type') == cntlr_type) and (model is None or elem.get('model') == model) and (index is None or elem.get('index') == index)): addr_elem = elem.find('./address') if addr_elem is not None: addr = Address.new_from_element(addr_elem).attrs if addr is not None: bus = int(addr['bus'], 0) slot = int(addr['slot'], 0) func = int(addr['function'], 0) addr_str = '%02d:%02d.%1d' % (bus, slot, func) logging.debug("String for address element %s is %s", addr, addr_str) return addr_str
def get_controller_addr(cntlr_type=None, model=None, index=None): """ Get the address of testing controller from VM XML as a string with format "bus:slot.function". """ cur_vm_xml = VMXML.new_from_dumpxml(vm_name) addr = None for elem in cur_vm_xml.xmltreefile.findall('/devices/controller'): if ((cntlr_type is None or elem.get('type') == cntlr_type) and (model is None or elem.get('model') == model) and (index is None or elem.get('index') == index)): addr_elem = elem.find('./address') if addr_elem is not None: addr = Address.new_from_element(addr_elem).attrs if addr is not None: bus = int(addr['bus'], 0) slot = int(addr['slot'], 0) func = int(addr['function'], 0) addr_str = '%02d:%02d.%1d' % (bus, slot, func) logging.debug("String for address element %s is %s", addr, addr_str) return addr_str
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Label the VM and disk with proper label. (3).Start VM and check the context. (4).Destroy VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic") sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux") sec_label = params.get("svirt_start_destroy_vm_sec_label", None) sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes") sec_dict = { 'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel } # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_start_destroy_disk_label') # Label the disks of VM with img_label. disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) utils_selinux.set_context_of_file(filename=disk_path, context=img_label) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the context of the VM. vmxml.set_seclabel(sec_dict) vmxml.sync() try: # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with seclabel can access the image with the context. if status_error: raise error.TestFail("Test successed in negative case.") # Check the label of VM and image when VM is running. vm_context = utils_selinux.get_context_of_process(vm.get_pid()) if (sec_type == "static") and (not vm_context == sec_label): raise error.TestFail( "Label of VM is not expected after starting.\n" "Detail: vm_context=%s, sec_label=%s" % (vm_context, sec_label)) disk_context = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (sec_relabel == "no") and (not disk_context == img_label): raise error.TestFail("Label of disk is not expected after VM " "starting.\n" "Detail: disk_context=%s, img_label=%s." % (disk_context, img_label)) if sec_relabel == "yes": vmxml = VMXML.new_from_dumpxml(vm_name) imagelabel = vmxml.get_seclabel()['imagelabel'] if not disk_context == imagelabel: raise error.TestFail( "Label of disk is not relabeled by VM\n" "Detal: disk_context=%s, imagelabel=%s" % (disk_context, imagelabel)) # Check the label of disk after VM being destroyed. vm.destroy() img_label_after = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (not img_label_after == img_label): raise error.TestFail( "Bug: Label of disk is not restored in VM " "shuting down.\n" "Detail: img_label_after=%s, " "img_label_before=%s.\n" % (img_label_after, img_label)) except virt_vm.VMStartError, e: # Starting VM failed. # VM with seclabel can not access the image with the context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) backup_xml.sync() utils_selinux.set_status(backup_sestatus)
def run_svirt_attach_disk(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Create a image to attached to VM. (3).Attach disk. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic") sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux") sec_label = params.get("svirt_attach_disk_vm_sec_label", None) sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_attach_disk_disk_label') img_name = "svirt_disk" # Default label for the other disks. # To ensure VM is able to access other disks. default_label = params.get('svirt_attach_disk_disk_default_label', None) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the default label to other disks of vm. disks = vm.get_disk_devices() for disk in disks.values(): utils_selinux.set_context_of_file(filename=disk['source'], context=default_label) # Init a QemuImg instance. params['image_name'] = img_name tmp_dir = data_dir.get_tmp_dir() image = qemu_storage.QemuImg(params, tmp_dir, img_name) # Create a image. img_path, result = image.create(params) # Set the context of the image. utils_selinux.set_context_of_file(filename=img_path, context=img_label) # Set the context of the VM. vmxml.set_seclabel(sec_dict) vmxml.sync() # Do the attach action. try: virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent", ignore_status=False) except error.CmdError: raise error.TestFail("Attach disk %s to vdf on VM %s failed." % (img_path, vm.name)) # Check result. try: # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with set seclabel can access the image with the # set context. if status_error: raise error.TestFail('Test successed in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. # VM with set seclabel can not access the image with the # set context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", ignore_status=False) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name) image.remove() backup_xml.sync() utils_selinux.set_status(backup_sestatus)
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Config qemu conf if need (3).Label the VM and disk with proper label. (4).Start VM and check the context. (5).Destroy VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic") sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux") sec_label = params.get("svirt_start_destroy_vm_sec_label", None) security_driver = params.get("security_driver", None) security_default_confined = params.get("security_default_confined", None) security_require_confined = params.get("security_require_confined", None) no_sec_model = 'yes' == params.get("no_sec_model", 'no') sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'relabel': sec_relabel} sec_dict_list = [] if not no_sec_model: if "," in sec_model: sec_models = sec_model.split(",") for model in sec_models: sec_dict['model'] = model if sec_type != "none": sec_dict['label'] = sec_label sec_dict_copy = sec_dict.copy() sec_dict_list.append(sec_dict_copy) else: sec_dict['model'] = sec_model if sec_type != "none": sec_dict['label'] = sec_label sec_dict_list.append(sec_dict) else: sec_dict_list.append(sec_dict) logging.debug("sec_dict_list is: %s" % sec_dict_list) poweroff_with_destroy = ("destroy" == params.get( "svirt_start_destroy_vm_poweroff", "destroy")) # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_start_destroy_disk_label') # Backup disk Labels. disks = vm.get_disk_devices() backup_labels_of_disks = {} backup_ownership_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) # Backup selinux of host. backup_sestatus = utils_selinux.get_status() qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # Set disk label for disk in disks.values(): disk_path = disk['source'] utils_selinux.set_context_of_file(filename=disk_path, context=img_label) os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # Set qemu conf if security_driver: qemu_conf.set_string('security_driver', security_driver) if security_default_confined: qemu_conf.security_default_confined = security_default_confined if security_require_confined: qemu_conf.security_require_confined = security_require_confined if (security_driver or security_default_confined or security_require_confined): logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Set the context of the VM. vmxml.set_seclabel(sec_dict_list) vmxml.sync() logging.debug("the domain xml is: %s" % vmxml.xmltreefile) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with seclabel can access the image with the context. if status_error: raise error.TestFail("Test succeeded in negative case.") # Check the label of VM and image when VM is running. vm_context = utils_selinux.get_context_of_process(vm.get_pid()) if (sec_type == "static") and (not vm_context == sec_label): raise error.TestFail("Label of VM is not expected after " "starting.\n" "Detail: vm_context=%s, sec_label=%s" % (vm_context, sec_label)) disk_context = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (sec_relabel == "no") and (not disk_context == img_label): raise error.TestFail("Label of disk is not expected after VM " "starting.\n" "Detail: disk_context=%s, img_label=%s." % (disk_context, img_label)) if sec_relabel == "yes" and not no_sec_model: vmxml = VMXML.new_from_dumpxml(vm_name) imagelabel = vmxml.get_seclabel()[0]['imagelabel'] if not disk_context == imagelabel: raise error.TestFail("Label of disk is not relabeled by " "VM\nDetal: disk_context=" "%s, imagelabel=%s" % (disk_context, imagelabel)) # Check the label of disk after VM being destroyed. if poweroff_with_destroy: vm.destroy(gracefully=False) else: vm.wait_for_login() vm.shutdown() img_label_after = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (not img_label_after == img_label): # Bug 547546 - RFE: the security drivers must remember original # permissions/labels and restore them after # https://bugzilla.redhat.com/show_bug.cgi?id=547546 err_msg = "Label of disk is not restored in VM shuting down.\n" err_msg += "Detail: img_label_after=%s, " % img_label_after err_msg += "img_label_before=%s.\n" % img_label err_msg += "More info in https://bugzilla.redhat.com/show_bug" err_msg += ".cgi?id=547546" raise error.TestFail(err_msg) except virt_vm.VMStartError, e: # Starting VM failed. # VM with seclabel can not access the image with the context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) for path, label in backup_ownership_of_disks.items(): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if (security_driver or security_default_confined or security_require_confined): qemu_conf.restore() libvirtd.restart()
def run(test, params, env): """ Test the video virtual devices 1. prepare a guest with different video devices 2. check whether the guest can be started, and set the related params 3. check the qemu cmd line and the params """ def check_heads_test_xml(model_type, is_primary=None, **kwargs): """ Check whether the added devices and attributes are shown in the guest xml """ if is_primary or is_primary is None: model_heads = kwargs.get("model_heads", default_primary_heads) else: model_heads = kwargs.get("model_heads", default_secondary_heads) pattern_model_type = "type=\'%s\'" % model_type pattern_heads = "heads=\'%s\'" % model_heads xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name)) for line in xml_after_adding_device.splitlines(): if pattern_model_type in line: if pattern_heads not in line: test.fail("Can not find %s video device or heads num" "is not as settings in the xml" % model_type) def check_mem_test_xml(model_type, mem_type, mem_size): """ Check whether the added devices and attributes are shown in the guest xml """ pattern_model_type = "type=\'%s\'" % model_type pattern_mem = "%s=\'%s\'" % (mem_type, mem_size) xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name)) for line in xml_after_adding_device.splitlines(): if pattern_model_type in line: if pattern_mem not in line: test.fail("Can not find %s video device or memory mem_size" "for %s is not as settings in the xml" % (model_type, mem_type)) def add_video_device(video_model, domain_xml, is_primary=None, status_error=False, **kwargs): """ add the video device xml snippet, then sync the guest xml """ video_dev = Video() video_dev.model_type = video_model if is_primary: video_dev.primary = "yes" for key, value in list(iteritems(kwargs)): setattr(video_dev, key, value) domain_xml.add_device(video_dev) try: # Take relevant line only from the XML (without header) video_xml_string = str(video_dev).split('\n')[-1] # Prepare a string for VI to replace with it using virsh edit utility replace_string = r":%s:<video>\_.\{-}</video>:" + video_xml_string + ":" status = libvirt.exec_virsh_edit(vm_name, [replace_string]) if status: domain_xml.sync() else: # Raise exception which is handled right after in except block. raise Exception('Virsh edit has failed, but that is ' 'intentional in negative cases.') except Exception as error: logging.debug(error) if not status_error: test.fail( "Failed to define the guest after adding the %s video " "device xml. Details: %s " % (video_model, error)) logging.debug("This is the expected failing in negative cases.") else: if status_error: test.fail("xml sync should failed as it is a negative case.") logging.debug("Add devices succeed in positive case.") def check_model_test_cmd_line(model_type, is_primary=None): """ Check whether the added video devices are shown in the qemu cmd line """ cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace( "\x00", " ") logging.debug("the cmdline is: %s" % cmdline) # s390x only supports virtio s390x_pattern = r"-device.*virtio-gpu-ccw" # aarch64 only supports virtio aarch64_pattern = r"-device.*virtio-gpu-pci" if is_primary or is_primary is None: if model_type == "vga": pattern = r"-device.*VGA" else: pattern = r"-device.*%s-vga" % model_type if guest_arch == 's390x': pattern = s390x_pattern elif guest_arch == 'aarch64': pattern = aarch64_pattern if not re.search(pattern, cmdline): test.fail("Can not find the primary %s video device " "in qemu cmd line." % model_type) else: if model_type == "qxl": pattern = r"-device.*qxl," elif model_type == "virtio": pattern = r"-device.*virtio-gpu-pci" if with_packed: pattern = r"-device.*virtio-gpu-pci.*packed\W{1,2}(true|on)" if guest_arch == 's390x': pattern = s390x_pattern if not re.search(pattern, cmdline): test.fail("Can not find the secondary %s video device " "in qemu cmd line." % model_type) def check_heads_test_cmd_line(model_type, is_primary=None, **kwargs): """ Check whether the heads number of video devices in the qemu cmd line are just the same with settings. """ cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace( "\x00", " ") logging.debug("the cmdline is: %s" % cmdline) # s390x only supports virtio s390x_pattern = r"-device.*virtio-gpu-ccw.*max_outputs\W{1,2}%s" # aarch64 only supports virtio aarch64_pattern = r"-device.*virtio-gpu-pci.*max_outputs\W{1,2}%s" if is_primary or is_primary is None: model_heads = kwargs.get("model_heads", default_primary_heads) if model_type == "qxl" or model_type == "virtio": pattern = r"-device.*%s-vga.*max_outputs\W{1,2}%s" % ( model_type, model_heads) if guest_arch == 's390x': pattern = s390x_pattern % model_heads elif guest_arch == 'aarch64': pattern = aarch64_pattern % model_heads if not re.search(pattern, cmdline): test.fail( "The heads number of the primary %s video device " "in not correct." % model_type) else: model_heads = kwargs.get("model_heads", default_secondary_heads) if model_type == "qxl": pattern = r"-device\sqxl\S+max_outputs=%s" % model_heads elif model_type == "virtio": pattern = r"-device.*virtio-gpu-pci.*max_outputs\W{1,2}%s" % model_heads if guest_arch == 's390x': pattern = s390x_pattern % model_heads if not re.search(pattern, cmdline): test.fail("The heads number of the secondary %s video device " "in not correct." % model_type) def check_mem_test_cmd_line(model_type, mem_type, mem_size): """ Check whether the video memory of video devices in the qemu cmd line are just the same with settings. """ cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace( "\x00", " ") logging.debug("the cmdline is: %s" % cmdline) if mem_type == "ram" or mem_type == "vram": cmd_mem_size = str(int(mem_size) * 1024) pattern = r"-device.*qxl-vga.*%s_size\W{1,2}%s" % (mem_type, cmd_mem_size) if mem_type == "vram" and model_type == "vga": cmd_mem_size = str(int(mem_size) // 1024) pattern = r"-device.*VGA.*vgamem_mb\W{1,2}%s" % cmd_mem_size if mem_type == "vgamem": cmd_mem_size = str(int(mem_size) // 1024) pattern = r"-device.*qxl-vga.*vgamem_mb\W{1,2}%s" % cmd_mem_size if mem_type == "vram64": cmd_mem_size = str(int(mem_size) // 1024) pattern = r"-device.*qxl-vga.*vram64_size_mb\W{1,2}%s" % cmd_mem_size if not re.search(pattern, cmdline): test.fail("The %s memory size of %s video device " "in not correct." % (mem_type, model_type)) def up_round_to_power_of_two(num): power = ceil(log(int(num), 2)) return pow(2, power) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) status_error = params.get("status_error", "no") == "yes" model_test = params.get("model_test", "no") == "yes" primary_video_model = params.get("primary_video_model") secondary_video_model = params.get("secondary_video_model", None) heads_test = params.get("heads_test", "no") == "yes" default_primary_heads = params.get("default_primary_heads", None) default_secondary_heads = params.get("default_secondary_heads", None) primary_heads = params.get("primary_heads", None) secondary_heads = params.get("secondary_heads", None) mem_test = params.get("mem_test", "no") == "yes" mem_type = params.get("mem_type", None) mem_size = params.get("mem_size", None) default_mem_size = params.get("default_mem_size", None) zero_size_test = params.get("zero_size_test", None) == "yes" non_power_of_2_test = params.get("non_power_of_2_test", None) == "yes" guest_arch = params.get("vm_arch_name") with_packed = params.get("with_packed", "no") == "yes" driver_packed = params.get("driver_packed", "on") vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() if vm.is_alive(): vm.destroy() if with_packed and not libvirt_version.version_compare(6, 3, 0): test.cancel("The virtio packed attribute is not supported in" " current libvirt version.") try: vm_xml.remove_all_device_by_type('video') kwargs = {} model_type = primary_video_model is_primary = None if secondary_video_model: is_primary = True if heads_test and not default_primary_heads: kwargs["model_heads"] = primary_heads if mem_test and not default_mem_size: kwargs["model_" + mem_type] = mem_size if model_type == "virtio" and with_packed: kwargs["driver_packed"] = driver_packed add_video_device(model_type, vm_xml, is_primary, status_error, **kwargs) if secondary_video_model: kwargs = {} model_type = secondary_video_model is_primary = False if heads_test and not default_secondary_heads: kwargs["model_heads"] = secondary_heads if model_type == "virtio" and with_packed: kwargs["driver_packed"] = driver_packed add_video_device(model_type, vm_xml, is_primary, status_error, **kwargs) if not status_error: res = virsh.start(vm_name) if res.exit_status: test.fail("failed to start vm after adding the video " "device xml. details: %s " % res) logging.debug("vm started successfully in positive cases.") if model_test: check_model_test_cmd_line(model_type, is_primary) if heads_test: check_heads_test_xml(model_type, is_primary, **kwargs) check_heads_test_cmd_line(model_type, is_primary, **kwargs) if mem_test: if mem_size is None: mem_size = default_mem_size if zero_size_test: mem_size = params.get("mem_size_after_define") if non_power_of_2_test: mem_size = up_round_to_power_of_two(mem_size) check_mem_test_xml(model_type, mem_type, mem_size) check_mem_test_cmd_line(model_type, mem_type, mem_size) finally: if vm.is_alive(): vm.destroy(vm_name) vm_xml_backup.sync()
def get_graphic_passwd(libvirt_vm): vmxml = VMXML.new_from_dumpxml(libvirt_vm.name, options="--security-info") if hasattr(vmxml.get_graphics_devices(), 'passwd'): return vmxml.get_graphics_devices()[0].passwd else: return None
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Config qemu conf if need (3).Label the VM and disk with proper label. (4).Start VM and check the context. (5).Destroy VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic") sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux") sec_label = params.get("svirt_start_destroy_vm_sec_label", None) sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None) security_driver = params.get("security_driver", None) security_default_confined = params.get("security_default_confined", None) security_require_confined = params.get("security_require_confined", None) no_sec_model = 'yes' == params.get("no_sec_model", 'no') sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'relabel': sec_relabel} sec_dict_list = [] def _set_sec_model(model): """ Set sec_dict_list base on given sec model type """ sec_dict_copy = sec_dict.copy() sec_dict_copy['model'] = model if sec_type != "none": if sec_type == "dynamic" and sec_baselabel: sec_dict_copy['baselabel'] = sec_baselabel else: sec_dict_copy['label'] = sec_label sec_dict_list.append(sec_dict_copy) if not no_sec_model: if "," in sec_model: sec_models = sec_model.split(",") for model in sec_models: _set_sec_model(model) else: _set_sec_model(sec_model) else: sec_dict_list.append(sec_dict) logging.debug("sec_dict_list is: %s" % sec_dict_list) poweroff_with_destroy = ("destroy" == params.get( "svirt_start_destroy_vm_poweroff", "destroy")) # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_start_destroy_disk_label') # Backup disk Labels. disks = vm.get_disk_devices() backup_labels_of_disks = {} backup_ownership_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) # Backup selinux of host. backup_sestatus = utils_selinux.get_status() qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() def _resolve_label(label_string): labels = label_string.split(":") label_type = labels[2] if len(labels) == 4: label_range = labels[3] elif len(labels) > 4: label_range = "%s:%s" % (labels[3], labels[4]) else: label_range = None return (label_type, label_range) def _check_label_equal(label1, label2): label1s = label1.split(":") label2s = label2.split(":") for i in range(len(label1s)): if label1s[i] != label2s[i]: return False return True try: # Set disk label (img_label_type, img_label_range) = _resolve_label(img_label) for disk in disks.values(): disk_path = disk['source'] dir_path = "%s(/.*)?" % os.path.dirname(disk_path) # Using semanage set context persistently utils_selinux.set_defcon(context_type=img_label_type, pathregex=dir_path, context_range=img_label_range) o_r = utils_selinux.verify_defcon(pathname=disk_path, readonly=False, forcedesc=True) orig_label_type = backup_labels_of_disks[disk_path].split(":")[2] if o_r and (orig_label_type != img_label_type): raise error.TestFail("change disk label(%s) failed" % img_label_type) os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # Set qemu conf if security_driver: qemu_conf.set_string('security_driver', security_driver) if security_default_confined: qemu_conf.security_default_confined = security_default_confined if security_require_confined: qemu_conf.security_require_confined = security_require_confined if (security_driver or security_default_confined or security_require_confined): logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Set the context of the VM. vmxml.set_seclabel(sec_dict_list) vmxml.sync() logging.debug("the domain xml is: %s" % vmxml.xmltreefile) # restart libvirtd libvirtd.restart() # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with seclabel can access the image with the context. if status_error: raise error.TestFail("Test succeeded in negative case.") # Check the label of VM and image when VM is running. vm_context = utils_selinux.get_context_of_process(vm.get_pid()) if (sec_type == "static") and (not vm_context == sec_label): raise error.TestFail("Label of VM is not expected after " "starting.\n" "Detail: vm_context=%s, sec_label=%s" % (vm_context, sec_label)) disk_context = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (sec_relabel == "no") and (not disk_context == img_label): raise error.TestFail("Label of disk is not expected after VM " "starting.\n" "Detail: disk_context=%s, img_label=%s." % (disk_context, img_label)) if sec_relabel == "yes" and not no_sec_model: vmxml = VMXML.new_from_dumpxml(vm_name) imagelabel = vmxml.get_seclabel()[0]['imagelabel'] # the disk context is 'system_u:object_r:svirt_image_t:s0', # when VM started, the MLS/MCS Range will be added automatically. # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx' # but we shouldn't check the MCS range. if not _check_label_equal(disk_context, imagelabel): raise error.TestFail("Label of disk is not relabeled by " "VM\nDetal: disk_context=" "%s, imagelabel=%s" % (disk_context, imagelabel)) # Check the label of disk after VM being destroyed. if poweroff_with_destroy: vm.destroy(gracefully=False) else: vm.wait_for_login() vm.shutdown() img_label_after = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (not img_label_after == img_label): # Bug 547546 - RFE: the security drivers must remember original # permissions/labels and restore them after # https://bugzilla.redhat.com/show_bug.cgi?id=547546 err_msg = "Label of disk is not restored in VM shuting down.\n" err_msg += "Detail: img_label_after=%s, " % img_label_after err_msg += "img_label_before=%s.\n" % img_label err_msg += "More info in https://bugzilla.redhat.com/show_bug" err_msg += ".cgi?id=547546" raise error.TestFail(err_msg) except virt_vm.VMStartError, e: # Starting VM failed. # VM with seclabel can not access the image with the context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up for path, label in backup_labels_of_disks.items(): # Using semanage set context persistently dir_path = "%s(/.*)?" % os.path.dirname(path) (img_label_type, img_label_range) = _resolve_label(label) utils_selinux.set_defcon(context_type=img_label_type, pathregex=dir_path, context_range=img_label_range) utils_selinux.verify_defcon(pathname=path, readonly=False, forcedesc=True) for path, label in backup_ownership_of_disks.items(): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if (security_driver or security_default_confined or security_require_confined): qemu_conf.restore() libvirtd.restart()
def run(test, params, env): """ Test the input virtual devices 1. prepare a guest with different input devices 2. check whether the guest can be started 3. check the qemu cmd line """ def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type) if with_packed: pattern = "<driver packed=\"%s\"" % (driver_packed) logging.debug('Searching for %s in vm xml', pattern) xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug('xml_after_adding_device:\n%s', xml_after_adding_device) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s input device xml " "in the guest xml file." % input_type) def check_qemu_cmd_line(): """ Check whether the added devices are shown in the qemu cmd line """ # if the tested input device is a keyboard or mouse with ps2 bus, # there is no keyboard or mouse in qemu cmd line if bus_type == "ps2" and input_type in ["keyboard", "mouse"]: return with open('/proc/%s/cmdline' % vm.get_pid(), 'r') as cmdline_file: cmdline = cmdline_file.read() if bus_type == "usb" and input_type == "keyboard": pattern = r"-device.*%s-kbd" % bus_type elif input_type == "passthrough": pattern = r"-device.*%s-input-host-pci" % bus_type else: pattern = r"-device.*%s-%s" % (bus_type, input_type) if not re.search(pattern, cmdline): test.fail("Can not find the %s input device " "in qemu cmd line." % input_type) if with_packed: pattern = r"packed.*%s" % driver_packed if not re.search(pattern, cmdline): test.fail("Can not find the packed driver " "in qemu cmd line") vm_name = params.get("main_vm", "avocado-vt-vm1") machine_type = params.get('machine_type', '') status_error = params.get("status_error", "no") == "yes" with_packed = params.get("with_packed", "no") == "yes" driver_packed = params.get("driver_packed", "on") bus_type = params.get("bus_type") input_type = params.get("input_type") check_preconditions(bus_type, input_type, with_packed, test) vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() if vm.is_alive(): vm.destroy() try: # ps2 keyboard and ps2 mouse are default, no need to re-add the xml, # unless it's machine_type is pseries if not (bus_type == "ps2" and input_type in ["keyboard", "mouse"] and machine_type != 'pseries'): vm_xml.remove_all_device_by_type('input') input_dev = Input(type_name=input_type) input_dev.input_bus = bus_type if input_type == "passthrough": kbd_dev_name = glob.glob('/dev/input/by-path/*kbd') if not kbd_dev_name: test.cancel("There is no keyboard device on this host.") logging.debug( "keyboard %s is going to be passthrough " "to the host.", kbd_dev_name[0]) input_dev.source_evdev = kbd_dev_name[0] if with_packed: input_dev.driver_packed = driver_packed vm_xml.add_device(input_dev) try: vm_xml.sync() except Exception as error: if not status_error: test.fail( "Failed to define the guest after adding the %s input " "device xml. Details: %s " % (input_type, error)) logging.debug( "This is the expected failing in negative cases.") return res = virsh.start(vm_name) if res.exit_status: if not status_error: test.fail("Failed to start vm after adding the %s input " "device xml. Details: %s " % (input_type, res.stderr)) logging.debug("This is the expected failure in negative cases.") return if status_error: test.fail( "Expected fail in negative cases but vm started successfully.") return logging.debug("VM started successfully in positive cases.") check_dumpxml() check_qemu_cmd_line() finally: if vm.is_alive(): virsh.destroy(vm_name) vm_xml_backup.sync()
def run_svirt_save_restore(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Label the VM and disk with proper label. (3).Save VM and check the context. (4).Restore VM and check the context. """ # Get general variables. status_error = "yes" == params.get("status_error", "no") host_sestatus = params.get("svirt_save_restore_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_save_restore_vm_sec_type", "dynamic") sec_model = params.get("svirt_save_restore_vm_sec_model", "selinux") sec_label = params.get("svirt_save_restore_vm_sec_label", None) sec_relabel = params.get("svirt_save_restore_vm_sec_relabel", "yes") sec_dict = {"type": sec_type, "model": sec_model, "label": sec_label, "relabel": sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get("svirt_save_restore_disk_label") # Label the disks of VM with img_label. disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk["source"] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(filename=disk_path) utils_selinux.set_context_of_file(filename=disk_path, context=img_label) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the context of the VM. vmxml.set_seclabel(sec_dict) vmxml.sync() # Init a path to save VM. save_path = os.path.join(test.tmpdir, "svirt_save") try: # Start VM to check the VM is able to access the image or not. try: vm.start() vm.save_to_file(path=save_path) vm.restore_from_file(path=save_path) # Save and restore VM successfully. if status_error: raise error.TestFail("Test successed in negative case.") except virt_vm.VMError, e: if not status_error: error_msg = "Test failed in positive case.\n error: %s\n" % e if str(e).count("getfd"): error_msg += "More info pleass refer to" " https://bugzilla.redhat.com/show_bug.cgi?id=976632" raise error.TestFail(error_msg) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) backup_xml.sync() utils_selinux.set_status(backup_sestatus)
def run(test, params, env): """ Test pci/pcie-to-pci bridge Hotplug interface to pci/pcie-to-pci bridge, then check xml and inside vm. Hotunplug interface, then check xml and inside vm """ vm_name = params.get('main_vm') pci_model = params.get('pci_model', 'pci') hotplug = 'yes' == params.get('hotplug', 'no') pci_model_name = params.get('pci_model_name') pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no') sound_dev_model_type = params.get('sound_dev_model_type', '') sound_dev_address = params.get('sound_dev_address', '') iface_model = params.get('iface_model', '') iface_source = params.get('iface_source', '') vmxml = VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() vm = env.get_vm(vm_name) try: # Check if there is a pci/pcie-to-pci bridge, if so, # just use the existing pci/pcie-to-pci-bridge to test ori_pci_br = [ dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model ] # If there is not a pci/pcie-to-pci bridge to test, # create one and add to vm if not ori_pci_br: logging.info('No %s on vm, create one', pci_model) pci_bridge = Controller('pci') pci_bridge.model = pci_model pci_bridge.model_name = {'name': pci_model_name} vmxml.add_device(pci_bridge) vmxml.sync() logging.debug(virsh.dumpxml(vm_name)) # Check if pci/pcie-to-pci bridge is successfully added vmxml = VMXML.new_from_inactive_dumpxml(vm_name) cur_pci_br = [ dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model ] if not cur_pci_br: test.error('Failed to add %s controller to vm xml' % pci_model) pci_br = cur_pci_br[0] logging.debug(pci_br) pci_br_index = pci_br.index # If test scenario requires another pci device on pci/pcie-to-pci # bridge before hotplug, add a sound device and make sure # the 'bus' is same with pci bridge index if pci_br_has_device: sound_dev = Sound() sound_dev.model_type = sound_dev_model_type sound_dev.address = eval(sound_dev_address % pci_br_index) logging.debug(sound_dev.address) vmxml.add_device(sound_dev) vmxml.sync() # Test hotplug scenario if hotplug: vm.start() vm.wait_for_login().close() # Create interface to be hotplugged logging.info('Create interface to be hotplugged') iface = Interface('network') iface.model = iface_model iface.source = eval(iface_source) mac = utils_net.generate_mac_address_simple() iface.mac_address = mac logging.debug(iface) result = virsh.attach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) xml_after_attach = VMXML.new_from_dumpxml(vm_name) logging.debug(virsh.dumpxml(vm_name)) # Check if the iface with given mac address is successfully attached iface_list = [ iface for iface in xml_after_attach.get_devices('interface') if iface.mac_address == mac ] logging.debug('iface list after attach: %s', iface_list) if not iface_list: test.error('Failed to attach interface %s' % iface) # Check inside vm def check_inside_vm(session, expect=True): ip_output = session.cmd('ip a') logging.debug(ip_output) return expect if mac in ip_output else not expect session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, True), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully attached:' 'not found mac address %s' % mac) session.close() # Test hotunplug result = virsh.detach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) logging.debug(virsh.dumpxml(vm_name)) # Check if the iface with given mac address has been # successfully detached xml_after_detach = VMXML.new_from_dumpxml(vm_name) iface_list_after_detach = [ iface for iface in xml_after_detach.get_devices('interface') if iface.mac_address == mac ] logging.debug('iface list after detach: %s', iface_list_after_detach) if iface_list_after_detach: test.fail('Failed to detach device: %s', iface) # Check again inside vm session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, False), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully detached:' 'found mac address %s' % mac) session.close() finally: bkxml.sync()
def run_libvirt_network_bandwidth(test, params, env): """ Test for network bandwidth in libvirt. 1. Preparation: * Init variables from params. * Keep a backup for vmxml and networkxml. * Build a file with dd command. 2. Edit vmxml and networkxml to control the bandwidth. 3. Verify the bandwidth with scp. 4. Clean up. """ # get the params from params vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) inbound_average = params.get("LNB_inbound_average", "512") inbound_peak = params.get("LNB_inbound_peak", "512") inbound_burst = params.get("LNB_inbound_burst", "32") outbound_average = params.get("LNB_outbound_average", "512") outbound_peak = params.get("LNB_outbound_peak", "512") outbound_burst = params.get("LNB_outbound_burst", "32") config_type = params.get("LNB_config_type", "network") bandwidth_tolerance = float(params.get("LNB_bandwidth_tolerance", "20"))/100 file_size = params.get("LNB_verify_file_size", "10") nic1_params = params.object_params('nic1') nettype = params.get('nettype') netdst = params.get('netdst') vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() # This test assume that VM is using default network. # Check the interfaces of VM to make sure default network # is used by VM. interfaces = vm_xml.get_devices(device_type="interface") # interface which is using default network. default_interface = None for interface in interfaces: if interface.source == {nettype: netdst}: default_interface = interface break if not default_interface: raise error.TestNAError("VM is not using default network," "skip this test.") bandwidth_inbound = {'average': inbound_average, 'peak': inbound_peak, 'burst': inbound_burst} bandwidth_outbound = {'average': outbound_average, 'peak': outbound_peak, 'burst': outbound_burst} network_xml = NetworkXML.new_from_net_dumpxml("default") network_xml_backup = network_xml.copy() tmp_dir = data_dir.get_tmp_dir() file_path = os.path.join(tmp_dir, "scp_file") # Init a QemuImg instance. cmd = "dd if=/dev/zero of=%s bs=1M count=%s" % (file_path, file_size) utils.run(cmd) try: if config_type == "network": network_xml.bandwidth_inbound = bandwidth_inbound network_xml.bandwidth_outbound = bandwidth_outbound network_xml.sync() elif config_type == "interface": devices = vm_xml.devices for index in range(len(devices)): if not (devices[index].device_tag == default_interface.device_tag): continue if devices[index].mac_address == default_interface.mac_address: default_interface.bandwidth_inbound = bandwidth_inbound default_interface.bandwidth_outbound = bandwidth_outbound devices[index] = default_interface break vm_xml.devices = devices vm_xml.sync() elif config_type == "portgroup": # Add a portgroup into default network portgroup_name = "test_portgroup" portgroup = PortgroupXML() portgroup.name = portgroup_name portgroup.bandwidth_inbound = bandwidth_inbound portgroup.bandwidth_outbound = bandwidth_outbound network_xml.portgroup = portgroup network_xml.sync() # Using the portgroup in VM. devices = vm_xml.devices for index in range(len(devices)): if not (devices[index].device_tag == default_interface.device_tag): continue if devices[index].mac_address == default_interface.mac_address: default_interface.portgroup = portgroup_name devices[index] = default_interface break vm_xml.devices = devices vm_xml.sync() else: raise error.TestNAError("Unsupported parameter config_type=%s." % config_type) # SCP to check the network bandwidth. if vm.is_alive(): vm.destroy() vm.start() vm.wait_for_login() time_before = time.time() vm.copy_files_to(host_path=file_path, guest_path="/root") time_after = time.time() speed_expected = int(inbound_average) speed_actual = (10*1024/(time_after-time_before)) if not (abs(speed_actual - speed_expected) <= speed_expected*bandwidth_tolerance): raise error.TestFail("Speed from host to guest is %s.\n" "But the average of bandwidth.inbound is %s.\n" % (speed_actual, speed_expected)) time_before = time.time() vm.copy_files_from(host_path=file_path, guest_path="/root/scp_file") time_after = time.time() speed_expected = int(outbound_average) speed_actual = (10*1024/(time_after-time_before)) if not (abs(speed_actual - speed_expected) <= speed_expected*bandwidth_tolerance): raise error.TestFail("Speed from guest to host is %s.\n" "But the average of bandwidth.outbound is %s\n" % (speed_actual, speed_expected)) finally: if os.path.exists(file_path): os.remove(file_path) network_xml_backup.sync() vm_xml_backup.sync()
def check_xml(): """ Predict the result serial device and generated console device and check the result domain XML against expectation """ console_cls = librarian.get('console') local_serial_type = serial_type if serial_type == 'tls': local_serial_type = 'tcp' # Predict expected serial and console XML expected_console = console_cls(local_serial_type) if local_serial_type == 'udp': sources = [] for source in serial_dev.sources: if 'service' in source and 'mode' not in source: source['mode'] = 'connect' sources.append(source) else: sources = serial_dev.sources expected_console.sources = sources if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_console.protocol_type = serial_dev.protocol_type else: expected_console.protocol_type = "raw" expected_console.target_port = serial_dev.target_port if 'target_type' in serial_dev: expected_console.target_type = serial_dev.target_type expected_console.target_type = console_target_type logging.debug("Expected console XML is:\n%s", expected_console) # Get current serial and console XML current_xml = VMXML.new_from_dumpxml(vm_name) serial_elem = current_xml.xmltreefile.find('devices/serial') console_elem = current_xml.xmltreefile.find('devices/console') if console_elem is None: test.fail("Expect generate console automatically, " "but found none.") if serial_elem and console_target_type != 'serial': test.fail("Don't Expect exist serial device, " "but found:\n%s" % serial_elem) cur_console = console_cls.new_from_element(console_elem) logging.debug("Current console XML is:\n%s", cur_console) # Compare current serial and console with oracle. if not expected_console == cur_console: # "==" has been override test.fail("Expect generate console:\n%s\nBut got:\n%s" % (expected_console, cur_console)) if console_target_type == 'serial': serial_cls = librarian.get('serial') expected_serial = serial_cls(local_serial_type) expected_serial.sources = sources set_targets(expected_serial) if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_serial.protocol_type = serial_dev.protocol_type else: expected_serial.protocol_type = "raw" expected_serial.target_port = serial_dev.target_port if serial_elem is None: test.fail("Expect exist serial device, " "but found none.") cur_serial = serial_cls.new_from_element(serial_elem) if target_type == 'pci-serial': if cur_serial.address is None: test.fail("Expect serial device address is not assigned") else: logging.debug("Serial address is: %s", cur_serial.address) logging.debug("Expected serial XML is:\n%s", expected_serial) logging.debug("Current serial XML is:\n%s", cur_serial) # Compare current serial and console with oracle. if (target_type != 'pci-serial' and machine_type != 'pseries' and not expected_serial == cur_serial): # "==" has been override test.fail("Expect serial device:\n%s\nBut got:\n " "%s" % (expected_serial, cur_serial))
def run_svirt_start_destroy(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Label the VM and disk with proper label. (3).Start VM and check the context. (4).Destroy VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic") sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux") sec_label = params.get("svirt_start_destroy_vm_sec_label", None) sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_start_destroy_disk_label') # Label the disks of VM with img_label. disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) utils_selinux.set_context_of_file(filename=disk_path, context=img_label) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the context of the VM. vmxml.set_seclabel(sec_dict) vmxml.sync() try: # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. # VM with seclabel can access the image with the context. if status_error: raise error.TestFail("Test successed in negative case.") # Check the label of VM and image when VM is running. vm_context = utils_selinux.get_context_of_process(vm.get_pid()) if (sec_type == "static") and (not vm_context == sec_label): raise error.TestFail("Label of VM is not expected after starting.\n" "Detail: vm_context=%s, sec_label=%s" % (vm_context, sec_label)) disk_context = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (sec_relabel == "no") and (not disk_context == img_label): raise error.TestFail("Label of disk is not expected after VM " "starting.\n" "Detail: disk_context=%s, img_label=%s." % (disk_context, img_label)) if sec_relabel == "yes": vmxml = VMXML.new_from_dumpxml(vm_name) imagelabel = vmxml.get_seclabel()['imagelabel'] if not disk_context == imagelabel: raise error.TestFail("Label of disk is not relabeled by VM\n" "Detal: disk_context=%s, imagelabel=%s" % (disk_context, imagelabel)) # Check the label of disk after VM being destroyed. vm.destroy() img_label_after = utils_selinux.get_context_of_file( filename=disks.values()[0]['source']) if (not img_label_after == img_label): raise error.TestFail("Bug: Label of disk is not restored in VM " "shuting down.\n" "Detail: img_label_after=%s, " "img_label_before=%s.\n" % (img_label_after, img_label)) except virt_vm.VMStartError, e: # Starting VM failed. # VM with seclabel can not access the image with the context. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) backup_xml.sync() utils_selinux.set_status(backup_sestatus)
def check_xml(): """ Predict the result serial device and generated console device and check the result domain XML against expectation """ console_cls = librarian.get('console') local_serial_type = serial_type if serial_type == 'tls': local_serial_type = 'tcp' # Predict expected serial and console XML expected_console = console_cls(local_serial_type) if local_serial_type == 'udp': sources = [] for source in serial_dev.sources: if 'service' in source and 'mode' not in source: source['mode'] = 'connect' sources.append(source) else: sources = serial_dev.sources expected_console.sources = sources if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_console.protocol_type = serial_dev.protocol_type else: expected_console.protocol_type = "raw" expected_console.target_port = serial_dev.target_port if 'target_type' in serial_dev: expected_console.target_type = serial_dev.target_type expected_console.target_type = console_target_type logging.debug("Expected console XML is:\n%s", expected_console) # Get current serial and console XML current_xml = VMXML.new_from_dumpxml(vm_name) serial_elem = current_xml.xmltreefile.find('devices/serial') console_elem = current_xml.xmltreefile.find('devices/console') if console_elem is None: test.fail("Expect generate console automatically, " "but found none.") if serial_elem and console_target_type != 'serial': test.fail("Don't Expect exist serial device, " "but found:\n%s" % serial_elem) cur_console = console_cls.new_from_element(console_elem) logging.debug("Current console XML is:\n%s", cur_console) # Compare current serial and console with oracle. if not expected_console == cur_console: # "==" has been override test.fail("Expect generate console:\n%s\nBut got:\n%s" % (expected_console, cur_console)) if console_target_type == 'serial': serial_cls = librarian.get('serial') expected_serial = serial_cls(local_serial_type) expected_serial.sources = sources set_targets(expected_serial) if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_serial.protocol_type = serial_dev.protocol_type else: expected_serial.protocol_type = "raw" expected_serial.target_port = serial_dev.target_port if serial_elem is None: test.fail("Expect exist serial device, " "but found none.") cur_serial = serial_cls.new_from_element(serial_elem) if target_type == 'pci-serial': if cur_serial.address is None: test.fail("Expect serial device address is not assigned") else: logging.debug("Serial address is: %s", cur_serial.address) logging.debug("Expected serial XML is:\n%s", expected_serial) logging.debug("Current serial XML is:\n%s", cur_serial) # Compare current serial and console with oracle. if target_type != 'pci-serial' and not expected_serial == cur_serial: # "==" has been override test.fail("Expect serial device:\n%s\nBut got:\n " "%s" % (expected_serial, cur_serial))
def run(test, params, env): """ Test for basic controller device function. 1) Define the VM w/o specified controller device and check result meets expectation. 2) Start the guest and check if start result meets expectation 3) Test the function of started controller device 4) Shutdown the VM and clean up environment """ def setup_os_xml(): """ Prepare os part of VM XML. """ osxml = vm_xml.os orig_machine = osxml.machine if os_machine: osxml.machine = os_machine vm_xml.os = osxml else: cur_machine = orig_machine def setup_controller_xml(index, addr_target=None): """ Prepare controller devices of VM XML. :param index: The index of controller :param addr_target: The controller address """ ctrl = Controller(type_name=cntlr_type) if model: ctrl.model = model if pcihole: ctrl.pcihole64 = pcihole if vectors: ctrl.vectors = vectors if index: ctrl.index = index if chassisNr: ctrl.target = {'chassisNr': chassisNr} if model_name: ctrl.model_name = {'name': model_name} if addr_target: match = re.match( r"(?P<bus>[0-9]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9])", addr_target) if match: addr_dict = match.groupdict() addr_dict['bus'] = hex(int(addr_dict['bus'], 16)) addr_dict['slot'] = hex(int(addr_dict['slot'], 16)) addr_dict['function'] = hex(int(addr_dict['function'], 16)) addr_dict['domain'] = '0x0000' ctrl.address = ctrl.new_controller_address(attrs=addr_dict) logging.debug("Controller XML is:%s", ctrl) vm_xml.add_device(ctrl) if cmpnn_cntlr_model is not None: for num in range(int(cmpnn_cntlr_num)): ctrl = Controller(type_name=cntlr_type) ctrl.model = cmpnn_cntlr_model + str(num + 1) ctrl.index = index logging.debug("Controller XML is:%s", ctrl) vm_xml.add_device(ctrl) def define_and_check(): """ Predict the error message when defining and try to define the guest with testing XML. """ fail_patts = [] if expect_err_msg: fail_patts.append(expect_err_msg) vm_xml.undefine() res = vm_xml.virsh.define(vm_xml.xml) logging.debug("Expect failures: %s", fail_patts) libvirt.check_result(res, expected_fails=fail_patts) return not res.exit_status def start_and_check(): """ Predict the error message when starting and try to start the guest. """ fail_patts = [] if expect_err_msg: fail_patts.append(expect_err_msg) res = virsh.start(vm_name) logging.debug("Expect failures: %s", fail_patts) libvirt.check_result(res, expected_fails=fail_patts) return not res.exit_status def prepare_qemu_pattern(elem): """ Collect the patterns to be searched in qemu command line. :param elem: a Controller object :return: A list including search patterns """ search_qemu_cmd = [] bus = int(elem.address.attrs.get('bus'), 0) slot = int(elem.address.attrs.get('slot'), 0) func = int(elem.address.attrs.get('function'), 0) addr_str = '%02d:%02d.%1d' % (bus, slot, func) name = elem.alias.get('name') if elem.model != 'dmi-to-pci-bridge': chassisNR = elem.target.get('chassisNr') value = "pci-bridge,chassis_nr=%s" % chassisNR value = "%s,id=%s,bus=pci.%d,addr=%#x" % (value, name, bus, slot) else: value = "%s" % elem.model_name['name'] value = "%s,id=%s,bus=pcie.%d,addr=%#x" % (value, name, bus, slot) tup = ('-device', value) search_qemu_cmd.append(tup) return search_qemu_cmd def search_controller(vm_xml, cntl_type, cntl_model, cntl_index, qemu_pattern=True): """ Search a controller as specified and prepare the expected qemu command line :params vm_xml: The guest VMXML instance :params cntl_type: The controller type :params cntl_model: The controller model :params cntl_index: The controller index :params qemu_pattern: True if it needs to be checked with qemu command line. False if not. :return: Tuple (Controller, List) Boolean: True if the controller is found. Otherwise, False. List: a list including qemu search patterns """ logging.debug("Search controller with type %s, model %s index %s", cntl_type, cntl_model, cntl_index) qemu_list = None found = False for elem in vm_xml.devices.by_device_tag('controller'): if (elem.type == cntl_type and elem.model == cntl_model and elem.index == cntl_index): found = True if (qemu_pattern and cntl_model != 'pci-root' and cntl_model != 'pcie-root'): qemu_list = prepare_qemu_pattern(elem) return (elem, qemu_list) if not found: test.fail("Can not find %s controller " "with index %s." % (cntl_model, cntl_index)) def get_patt_inx_ctl(cur_vm_xml, qemu_list, inx): """ Get search pattern in qemu line for some kind of cases :param cur_vm_xml: Guest xml :param qemu_list: List for storing qemu search patterns :param inx: Controller index used :return: a tuple for (search_result, qemu_list) """ (search_result, qemu_search) = search_controller(cur_vm_xml, cntlr_type, model, inx) if qemu_search: qemu_list.extend(qemu_search) return (search_result, qemu_list) def get_patt_non_zero_bus(cur_vm_xml, qemu_list): """ """ actual_set = set() for elem in cur_vm_xml.devices.by_device_tag('controller'): if (elem.type == cntlr_type and elem.model == model): actual_set.add(int(elem.index)) qemu_list = prepare_qemu_pattern(elem) expect_set = set() for num in range(1, int(pci_bus_number) + 1): expect_set.add(num) logging.debug("expect: %s, actual: %s", expect_set, actual_set) if (not actual_set.issubset(expect_set) or not expect_set.issubset(actual_set)): test.fail("The actual index set (%s)does " "not match the expect index set " "(%s)." % (actual_set, expect_set)) return qemu_list def get_search_patt_qemu_line(): """ Check if the guest XML has the expected content. :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3 """ cur_vm_xml = VMXML.new_from_dumpxml(vm_name) qemu_list = [] # Check the pci-root controller has index = 0 if no_pci_controller == "yes": (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, '0') return qemu_list # Check index numbers of pci-bridge controllers should be equal # to the pci_bus_number if int(pci_bus_number) > 0: return get_patt_non_zero_bus(cur_vm_xml, qemu_list) # All controllers should exist if there is a gap between two PCI # controller indexes if index and index_second and int(index) > 0 and int(index_second) > 0: for idx in range(int(index_second), int(index) + 1): (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, str(idx)) return qemu_list # All controllers should exist with index among [1..index] if index and int(index) > 0 and not index_second: for idx in range(1, int(index) + 1): (search_result, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, str(idx)) if not search_result: test.fail("Can not find %s controller " "with index %s." % (model, str(idx))) return qemu_list def get_controller_addr(cntlr_type=None, model=None, index=None): """ Get the address of testing controller from VM XML as a string with format "bus:slot.function". :param cntlr_type: controller type :param model: controller model :param index: controller index :return: an address string of the specified controller """ if model in ['pci-root', 'pcie-root']: return None addr_str = None cur_vm_xml = VMXML.new_from_dumpxml(vm_name) for elem in cur_vm_xml.devices.by_device_tag('controller'): if ((cntlr_type is None or elem.type == cntlr_type) and (model is None or elem.model == model) and (index is None or elem.index == index)): addr_elem = elem.address if addr_elem is None: test.error("Can not find 'Address' " "element for the controller") bus = int(addr_elem.attrs.get('bus'), 0) slot = int(addr_elem.attrs.get('slot'), 0) func = int(addr_elem.attrs.get('function'), 0) addr_str = '%02d:%02x.%1d' % (bus, slot, func) logging.debug("Controller address is %s", addr_str) break return addr_str def check_controller_addr(): """ Check test controller address against expectation. """ addr_str = get_controller_addr(cntlr_type, model, index) if model in ['pci-root', 'pcie-root']: if addr_str is None: return else: test.fail('Expect controller do not have address, ' 'but got "%s"' % addr_str) if index != 0: if '00:00' in addr_str: test.fail("Invalid PCI address 0000:00:00, " "at least one of domain, bus, " "or slot must be > 0") exp_addr_patt = r'00:[0-9]{2}.[0-9]' if model in ['ehci']: exp_addr_patt = r'0[1-9]:[0-9]{2}.[0-9]' if addr_str: exp_addr_patt = addr_str if not re.match(exp_addr_patt, addr_str): test.fail('Expect get controller address "%s", ' 'but got "%s"' % (exp_addr_patt, addr_str)) def check_qemu_cmdline(search_pattern=None): """ Check domain qemu command line against expectation. :param search_pattern: search list with tuple objects """ with open('/proc/%s/cmdline' % vm.get_pid()) as proc_file: cmdline = proc_file.read() logging.debug('Qemu command line: %s', cmdline) options = cmdline.split('\x00') # Search the command line options for the given patterns if search_pattern and isinstance(search_pattern, list): for pattern in search_pattern: key = pattern[0] value = pattern[1] logging.debug("key=%s, value=%s", key, value) found = False check_value = False for opt in options: if check_value: if opt == value: logging.debug("Found the expected (%s %s) in qemu " "command line" % (key, value)) found = True break check_value = False if key == opt: check_value = True if not found: test.fail("Can not find '%s %s' in qemu " "command line" % (key, value)) # Get pcihole options from qemu command line pcihole_opt = '' for idx, opt in enumerate(options): if 'pci-hole64-size' in opt: pcihole_opt = opt # Get expected pcihole options from params exp_pcihole_opt = '' if (cntlr_type == 'pci' and model in ['pci-root', 'pcie-root'] and pcihole): if 'pc' in cur_machine: exp_pcihole_opt = 'i440FX-pcihost' elif 'q35' in cur_machine: exp_pcihole_opt = 'q35-pcihost' exp_pcihole_opt += '.pci-hole64-size=%sK' % pcihole # Check options against expectation if pcihole_opt != exp_pcihole_opt: test.fail('Expect get qemu command serial option "%s", ' 'but got "%s"' % (exp_pcihole_opt, pcihole_opt)) # Check usb options against expectation if cntlr_type == "usb": pattern = "" if cmpnn_cntlr_num is not None: for num in range(int(cmpnn_cntlr_num)): name = (cmpnn_cntlr_model + str(num + 1)).split('-') pattern = pattern + r"-device.%s-usb-%s.*" % (name[0], name[1]) elif model == "ehci": pattern = r"-device.usb-ehci" elif model == "qemu-xhci": pattern = r"-device.qemu-xhci" logging.debug("pattern is %s", pattern) if pattern and not re.search(pattern, cmdline): test.fail( "Expect get usb model info in qemu cmdline, but failed!") def check_guest(cntlr_type, cntlr_model, cntlr_index=None): """ Check status within the guest against expectation. """ if model == 'pci-root' or model == 'pcie-root': return addr_str = get_controller_addr(cntlr_type=cntlr_type, model=cntlr_model, index=cntlr_index) pci_name = 'PCI bridge:' verbose_option = "" if cntlr_type == 'virtio-serial': verbose_option = '-vvv' if (addr_str is None and model != 'pci-root' and model != 'pcie-root'): test.error("Can't find target controller in XML") if cntlr_index: logging.debug("%s, %s, %s", cntlr_type, cntlr_model, cntlr_index) if (addr_str is None and cntlr_model != 'pci-root' and cntlr_model != 'pcie-root'): test.fail("Can't find target controller in XML") session = vm.wait_for_login(serial=True) status, output = session.cmd_status_output('lspci %s -s %s' % (verbose_option, addr_str)) logging.debug("lspci output is: %s", output) if (cntlr_type == 'virtio-serial' and (vectors and int(vectors) == 0)): if 'MSI' in output: test.fail("Expect MSI disable with zero vectors, " "but got %s" % output) if (cntlr_type == 'virtio-serial' and (vectors is None or int(vectors) != 0)): if 'MSI' not in output: test.fail("Expect MSI enable with non-zero vectors, " "but got %s" % output) if (cntlr_type == 'pci'): if pci_name not in output: test.fail("Can't find target pci device" " '%s' on guest " % addr_str) os_machine = params.get('os_machine', None) libvirt.check_machine_type_arch(os_machine) cntlr_type = params.get('controller_type', None) model = params.get('controller_model', None) index = params.get('controller_index', None) vectors = params.get('controller_vectors', None) pcihole = params.get('controller_pcihole64', None) chassisNr = params.get('chassisNr', None) addr_str = params.get('controller_address', None) cmpnn_cntlr_model = params.get('companion_controller_model', None) cmpnn_cntlr_num = params.get('companion_controller_num', None) vm_name = params.get("main_vm", "avocado-vt-vm1") no_pci_controller = params.get("no_pci_controller", "no") pci_bus_number = params.get("pci_bus_number", "0") remove_address = params.get("remove_address", "yes") setup_controller = params.get("setup_controller", "yes") index_second = params.get("controller_index_second", None) cur_machine = os_machine check_qemu = "yes" == params.get("check_qemu", "no") check_within_guest = "yes" == params.get("check_within_guest", "no") run_vm = "yes" == params.get("run_vm", "no") second_level_controller_num = params.get("second_level_controller_num", "0") status_error = "yes" == params.get("status_error", "no") model_name = params.get("model_name", None) expect_err_msg = params.get("err_msg", None) if index and index_second: if int(index) > int(index_second): test.error("Invalid parameters") vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() try: vm_xml.remove_all_device_by_type('controller') if remove_address == "yes": remove_devices(vm_xml, 'address') remove_devices(vm_xml, 'usb') if setup_controller == "yes": if index_second: setup_controller_xml(index_second) setup_controller_xml(index, addr_str) if second_level_controller_num: for indx in range(2, int(second_level_controller_num) + 2): addr_second = "0%s:0%s.0" % (index, str(indx)) setup_controller_xml(str(indx), addr_second) setup_os_xml() if int(pci_bus_number) > 0: address_params = {'bus': "%0#4x" % int(pci_bus_number)} libvirt.set_disk_attr(vm_xml, 'vda', 'address', address_params) logging.debug("Test VM XML before define is %s" % vm_xml) if not define_and_check(): logging.debug("Can't define the VM, exiting.") return vm_xml = VMXML.new_from_dumpxml(vm_name) logging.debug("Test VM XML after define is %s" % vm_xml) check_controller_addr() if run_vm: try: if not start_and_check(): logging.debug("Can't start the VM, exiting.") return except virt_vm.VMStartError as detail: test.fail(detail) search_qemu_cmd = get_search_patt_qemu_line() if check_qemu: check_qemu_cmdline(search_pattern=search_qemu_cmd) if check_within_guest: try: if int(pci_bus_number) > 0: for contr_idx in range(1, int(pci_bus_number) + 1): check_guest(cntlr_type, model, str(contr_idx)) return if index: check_max_index = int(index) + int( second_level_controller_num) for contr_idx in range(1, int(check_max_index) + 1): check_guest(cntlr_type, model, str(contr_idx)) else: check_guest(cntlr_type, model) if model == 'pcie-root': # Need check other auto added controller check_guest(cntlr_type, 'dmi-to-pci-bridge', '1') check_guest(cntlr_type, 'pci-bridge', '2') except remote.LoginTimeoutError as e: logging.debug(e) if not status_error: raise finally: vm_xml_backup.sync()
def run(test, params, env): """ Test the sound virtual devices 1. prepare a guest with different sound devices 2. check whether the guest can be started 3. check the xml and qemu cmd line """ # Sound element supported since 0.4.3. if not libvirt_version.version_compare(0, 4, 3): test.cancel("Sound device is not supported " "on current version.") # Codec sub-element supported since 0.9.13 codec_type = params.get("codec_type", None) if codec_type and not libvirt_version.version_compare(0, 9, 13): test.cancel("codec sub-element is not supported " "on current version.") def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<sound model=\"%s\">" % sound_model # Check sound model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s sound device xml " "in the guest xml file." % sound_model) # Check codec type if codec_type: pattern = "<codec type=\"%s\" />" % codec_type if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s codec xml for sound dev " "in the guest xml file." % codec_type) def check_qemu_cmd_line(): """ Check whether the added devices are shown in the qemu cmd line """ if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() # Check sound model if sound_model == "ac97": pattern = r"-device.AC97" elif sound_model == "ich6": pattern = r"-device.intel-hda" else: pattern = r"-device.ich9-intel-hda" if not re.search(pattern, cmdline): test.fail("Can not find the %s sound device " "in qemu cmd line." % sound_model) # Check codec type if sound_model in ["ich6", "ich9"]: if codec_type == "micro": pattern = r"-device.hda-micro" else: # Duplex is default in qemu cli even codec not set # But before 0.9.13, no codec_type so no default if libvirt_version.version_compare(0, 9, 13): pattern = r"-device.hda-duplex" if not re.search(pattern, cmdline): test.fail("Can not find the %s codec for sound dev " "in qemu cmd line." % codec_type) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) status_error = params.get("status_error", "no") == "yes" sound_model = params.get("sound_model") # AC97 sound model supported since 0.6.0 if sound_model == "ac97": if not libvirt_version.version_compare(0, 6, 0): test.cancel("ac97 sound model is not supported " "on current version.") # Ich6 sound model supported since 0.8.8 if sound_model == "ich6": if not libvirt_version.version_compare(0, 8, 8): test.cancel("ich6 sound model is not supported " "on current version.") # Ich9 sound model supported since 1.1.3 if sound_model == "ich9": if not libvirt_version.version_compare(1, 1, 3): test.cancel("ich9 sound model is not supported " "on current version.") vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() logging.debug("vm xml is %s", vm_xml_backup) if vm.is_alive(): vm.destroy() try: vm_xml.remove_all_device_by_type('sound') sound_dev = Sound() sound_dev.model_type = sound_model if codec_type: sound_dev.codec_type = codec_type vm_xml.add_device(sound_dev) vm_xml.sync() virsh.start(vm_name, ignore_status=False) check_dumpxml() check_qemu_cmd_line() finally: if vm.is_alive(): virsh.destroy(vm_name) vm_xml_backup.sync()
def run(test, params, env): """ Test the sound virtual devices 1. prepare a guest with different sound devices 2. check whether the guest can be started 3. check the xml and qemu cmd line """ # Sound element supported since 0.4.3. if not libvirt_version.version_compare(0, 4, 3): test.cancel("Sound device is not supported " "on current version.") # Codec sub-element supported since 0.9.13 codec_type = params.get("codec_type", None) if codec_type and not libvirt_version.version_compare(0, 9, 13): test.cancel("codec sub-element is not supported " "on current version.") def check_dumpxml(): """ Check whether the added devices are shown in the guest xml """ pattern = "<sound model=\"%s\">" % sound_model # Check sound model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s sound device xml " "in the guest xml file." % sound_model) # Check codec type if codec_type: pattern = "<codec type=\"%s\" />" % codec_type if pattern not in str(xml_after_adding_device): test.fail("Can not find the %s codec xml for sound dev " "in the guest xml file." % codec_type) def check_qemu_cmd_line(): """ Check whether the added devices are shown in the qemu cmd line """ if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() # Check sound model if sound_model == "ac97": pattern = r"-device.AC97" elif sound_model == "ich6": pattern = r"-device.intel-hda" else: pattern = r"-device.ich9-intel-hda" if not re.search(pattern, cmdline): test.fail("Can not find the %s sound device " "in qemu cmd line." % sound_model) # Check codec type if sound_model in ["ich6", "ich9"]: if codec_type == "micro": pattern = r"-device.hda-micro" else: # Duplex is default in qemu cli even codec not set # But before 0.9.13, no codec_type so no default if libvirt_version.version_compare(0, 9, 13): pattern = r"-device.hda-duplex" if not re.search(pattern, cmdline): test.fail("Can not find the %s codec for sound dev " "in qemu cmd line." % codec_type) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) status_error = params.get("status_error", "no") == "yes" sound_model = params.get("sound_model") # AC97 sound model supported since 0.6.0 if sound_model == "ac97": if not libvirt_version.version_compare(0, 6, 0): test.cancel("ac97 sound model is not supported " "on current version.") # Ich6 sound model supported since 0.8.8 if sound_model == "ich6": if not libvirt_version.version_compare(0, 8, 8): test.cancel("ich6 sound model is not supported " "on current version.") # Ich9 sound model supported since 1.1.3 if sound_model == "ich9": if not libvirt_version.version_compare(1, 1, 3): test.cancel("ich9 sound model is not supported " "on current version.") vm_xml = VMXML.new_from_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() logging.debug("vm xml is %s", vm_xml_backup) if vm.is_alive(): vm.destroy() try: vm_xml.remove_all_device_by_type('sound') sound_dev = Sound() sound_dev.model_type = sound_model if codec_type: sound_dev.codec_type = codec_type vm_xml.add_device(sound_dev) vm_xml.sync() virsh.start(vm_name) check_dumpxml() check_qemu_cmd_line() finally: if vm.is_alive(): virsh.destroy(vm_name) vm_xml_backup.sync()
def run(test, params, env): """ Test pci/pcie-to-pci bridge Hotplug interface to pci/pcie-to-pci bridge, then check xml and inside vm. Hotunplug interface, then check xml and inside vm Other test scenarios of pci/pcie-to-pci bridge """ def create_pci_device(pci_model, pci_model_name, **kwargs): """ Create a pci/pcie bridge :param pci_model: model of pci controller device :param pci_model_name: model name of pci controller device :param kwargs: other k-w args that needed to create device :return: the newly created device object """ pci_bridge = Controller('pci') pci_bridge.model = pci_model pci_bridge.model_name = {'name': pci_model_name} if 'index' in kwargs: pci_bridge.index = kwargs['index'] if 'address' in kwargs: pci_bridge.address = pci_bridge.new_controller_address( attrs=eval(kwargs['address'])) logging.debug('pci_bridge: %s', pci_bridge) return pci_bridge def create_iface(iface_model, iface_source, **kwargs): """ Create an interface to be attached to vm :param iface_model: model of the interface device :param iface_source: source of the interface device :param kwargs: other k-w args that needed to create device :return: the newly created interface object """ iface = Interface('network') iface.model = iface_model iface.source = eval(iface_source) if 'mac' in kwargs: iface.mac_address = kwargs['mac'] else: mac = utils_net.generate_mac_address_simple() iface.mac_address = mac if 'address' in kwargs: iface.address = iface.new_iface_address(attrs=eval(kwargs['address'])) logging.debug('iface: %s', iface) return iface vm_name = params.get('main_vm') status_error = 'yes' == params.get('status_error', 'no') err_msg = params.get('err_msg', '').split(';') case = params.get('case', '') hotplug = 'yes' == params.get('hotplug', 'no') need_pci_br = 'yes' == params.get('need_pci_br', 'no') pci_model = params.get('pci_model', 'pci') pci_model_name = params.get('pci_model_name') pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}')) pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no') sound_dev_model_type = params.get('sound_dev_model_type', '') sound_dev_address = params.get('sound_dev_address', '') iface_model = params.get('iface_model', '') iface_source = params.get('iface_source', '') iface_kwargs = eval(params.get('iface_kwargs', '{}')) max_slots = int(params.get('max_slots', 31)) pcie_br_count = int(params.get('pcie_br_count', 3)) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() vm = env.get_vm(vm_name) try: # Check if there is a pci/pcie-to-pci bridge, if so, # just use the existing pci/pcie-to-pci-bridge to test ori_pci_br = [dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model] if need_pci_br: # If there is not a pci/pcie-to-pci bridge to test, # create one and add to vm if not ori_pci_br: logging.info('No %s on vm, create one', pci_model) pci_bridge = create_pci_device(pci_model, pci_model_name) vmxml.add_device(pci_bridge) vmxml.sync() logging.debug(virsh.dumpxml(vm_name)) # Check if pci/pcie-to-pci bridge is successfully added vmxml = VMXML.new_from_inactive_dumpxml(vm_name) cur_pci_br = [dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model] if not cur_pci_br: test.error('Failed to add %s controller to vm xml' % pci_model) pci_br = cur_pci_br[0] logging.debug('pci_br: %s', pci_br) pci_br_index = pci_br.index # If test scenario requires another pci device on pci/pcie-to-pci # bridge before hotplug, add a sound device and make sure # the 'bus' is same with pci bridge index if need_pci_br and pci_br_has_device: sound_dev = Sound() sound_dev.model_type = sound_dev_model_type sound_dev.address = eval(sound_dev_address % pci_br_index) logging.debug('sound_dev.address: %s', sound_dev.address) vmxml.add_device(sound_dev) if case != 'vm_with_pcie_br_1_br': vmxml.sync() # Test hotplug scenario if hotplug: vm.start() vm.wait_for_login().close() # Create interface to be hotplugged logging.info('Create interface to be hotplugged') iface = create_iface(iface_model, iface_source) mac = iface.mac_address result = virsh.attach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) xml_after_attach = VMXML.new_from_dumpxml(vm_name) logging.debug(virsh.dumpxml(vm_name)) # Check if the iface with given mac address is successfully # attached with address bus equal to pcie/pci bridge's index iface_list = [ iface for iface in xml_after_attach.get_devices('interface') if iface.mac_address == mac and int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16) ] logging.debug('iface list after attach: %s', iface_list) if not iface_list: test.error('Failed to attach interface %s' % iface) # Check inside vm def check_inside_vm(session, expect=True): ip_output = session.cmd('ip a') logging.debug('output of "ip a": %s', ip_output) return expect if mac in ip_output else not expect session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, True), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully attached:' 'not found mac address %s' % mac) session.close() # Test hotunplug result = virsh.detach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) logging.debug(virsh.dumpxml(vm_name)) # Check if the iface with given mac address has been # successfully detached xml_after_detach = VMXML.new_from_dumpxml(vm_name) iface_list_after_detach = [ iface for iface in xml_after_detach.get_devices('interface') if iface.mac_address == mac ] logging.debug('iface list after detach: %s', iface_list_after_detach) if iface_list_after_detach: test.fail('Failed to detach device: %s' % iface) # Check again inside vm session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, False), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully detached:' 'found mac address %s' % mac) session.close() # Other test scenarios of pci/pcie if case: logging.debug('iface_kwargs: %s', iface_kwargs) # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge. # or Invalid controller index for pcie-to-pci-bridge. if case in ('wrong_model_name', 'invalid_index'): pci_bridge = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs) vmxml.add_device(pci_bridge) result_to_check = virsh.define(vmxml.xml, debug=True) # Attach device with invalid slot to pcie-to-pci-bridge if case == 'attach_with_invalid_slot': iface = create_iface(iface_model, iface_source, **iface_kwargs) vmxml.add_device(iface) result_to_check = virsh.define(vmxml.xml, debug=True) # Test that pcie-to-pci-bridge has 31 available slots if case == 'max_slots': target_bus = cur_pci_br[0].index target_bus = hex(int(target_bus)) logging.debug('target_bus: %s', target_bus) # Attach 32 interfaces for i in range(max_slots + 1): logging.debug('address: %s', iface_kwargs['address']) new_iface_kwargs = {'address': iface_kwargs['address'] % (target_bus, hex(i + 1))} iface = create_iface(iface_model, iface_source, **new_iface_kwargs) logging.info('Attaching the %d th interface', i + 1) result_in_loop = virsh.attach_device( vm_name, iface.xml, flagstr='--config', debug=True) # Attaching the 32rd interfaces will fail if i == max_slots: status_error = True libvirt.check_exit_status(result_in_loop, expect_error=status_error) logging.debug(virsh.dumpxml(vm_name)) # Get all devices on pcie-to-pci-bridge from new xml # Test if it matches with value of max_slots new_xml = VMXML.new_from_dumpxml(vm_name) device_on_pci_br = [ dev for dev in new_xml.get_devices('interface') if dev.address['type_name'] == 'pci' and int(dev.address['attrs']['bus'], 16) == int(target_bus, 16) ] logging.info('All slots of pcie-to-pci-bridge is %d', len(device_on_pci_br)) if len(device_on_pci_br) != max_slots: test.fail('Max slots is %d instead of %d' % (len(device_on_pci_br), max_slots)) # Define a guest with pcie-to-pci-bridge controller's index <=bus if case.startswith('index_v_bus'): last_pci_index = max([ int(dev.index) for dev in vmxml.get_devices('controller') if dev.type == 'pci']) # New index of new pcie-bridge should be +1 new_index = last_pci_index + 1 if case.endswith('less_than'): new_bus = new_index + 1 elif case.endswith('equal_to'): new_bus = new_index else: test.error('Invalid test: %s' % case) pci_br_kwargs['index'] = new_index pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus) logging.debug('pci_br_kwargs: %s', pci_br_kwargs) pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs) vmxml.add_device(pcie_br) result_to_check = virsh.define(vmxml.xml, debug=True) # Test define & start an VM with/without pcie-to-pci-bridge if case.startswith('vm_with_pcie_br'): if case.endswith('1_br'): pass elif case.endswith('multi_br'): # Add $pcie_br_count pcie-root-port to vm for i in range(pcie_br_count): temp_xml = VMXML.new_from_inactive_dumpxml(vm_name) port = create_pci_device('pcie-root-port', 'pcie-root-port') temp_xml.add_device(port) temp_xml.sync() vmxml = VMXML.new_from_inactive_dumpxml(vm_name) pci_root_ports = [dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == 'pcie-root-port'] indexs = sorted([hex(int(dev.index)) for dev in pci_root_ports]) logging.debug('indexs: %s', indexs) # Add $pcie_br_count pcie-to-pci-bridge to vm, # on separated pcie-root-port for i in range(pcie_br_count): new_kwargs = { 'address': pci_br_kwargs['address'] % indexs[-i - 1]} pcie_br = create_pci_device(pci_model, pci_model_name, **new_kwargs) vmxml.add_device(pcie_br) elif case.endswith('no_br'): # Delete all pcie-to-pci-bridge for dev in ori_pci_br: vmxml.del_device(dev) vmxml.sync() # Add an pci device(rtl8139 nic) iface = create_iface(iface_model, iface_source) vmxml.add_device(iface) else: test.error('Invalid test: %s' % case) # Test define and start vm with new xml settings result_define_vm = virsh.define(vmxml.xml, debug=True) libvirt.check_exit_status(result_define_vm) result_start_vm = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result_start_vm) # Login to make sure vm is actually started vm.create_serial_console() vm.wait_for_serial_login().close() logging.debug(virsh.dumpxml(vm_name)) # Get all pcie-to-pci-bridge after test operations new_vmxml = VMXML.new_from_inactive_dumpxml(vm_name) cur_pci_br = [dev for dev in new_vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model] logging.debug('cur_pci_br: %s', cur_pci_br) if case.endswith('multi_br'): # Check whether all new pcie-to-pci-br are successfullly added if len(cur_pci_br) - len(ori_pci_br) != pcie_br_count: test.fail('Not all pcie-to-pci-br successfully added.' 'Should be %d, actually is %d' % (pcie_br_count, len(cur_pci_br) - len(ori_pci_br))) elif case.endswith('no_br'): # Check if a pcie-to-pci-br will be automatically created if len(cur_pci_br) == 0: test.fail('No new pcie-to-pci-bridge automatically created') # Check command result if there is a result to be checked if 'result_to_check' in locals(): libvirt.check_exit_status(result_to_check, expect_error=status_error) libvirt.check_result(result_to_check, expected_fails=err_msg) finally: bkxml.sync()
def run(test, params, env): """ Test pci/pcie-to-pci bridge Hotplug interface to pci/pcie-to-pci bridge, then check xml and inside vm. Hotunplug interface, then check xml and inside vm Other test scenarios of pci/pcie-to-pci bridge """ def create_pci_device(pci_model, pci_model_name, **kwargs): """ Create a pci/pcie bridge :param pci_model: model of pci controller device :param pci_model_name: model name of pci controller device :param kwargs: other k-w args that needed to create device :return: the newly created device object """ pci_bridge = Controller('pci') pci_bridge.model = pci_model pci_bridge.model_name = {'name': pci_model_name} if 'index' in kwargs: pci_bridge.index = kwargs['index'] if 'address' in kwargs: pci_bridge.address = pci_bridge.new_controller_address( attrs=eval(kwargs['address'])) logging.debug('pci_bridge: %s', pci_bridge) return pci_bridge def create_iface(iface_model, iface_source, **kwargs): """ Create an interface to be attached to vm :param iface_model: model of the interface device :param iface_source: source of the interface device :param kwargs: other k-w args that needed to create device :return: the newly created interface object """ iface = Interface('network') iface.model = iface_model iface.source = eval(iface_source) if 'mac' in kwargs: iface.mac_address = kwargs['mac'] else: mac = utils_net.generate_mac_address_simple() iface.mac_address = mac if 'address' in kwargs: iface.address = iface.new_iface_address(attrs=eval(kwargs['address'])) logging.debug('iface: %s', iface) return iface vm_name = params.get('main_vm') status_error = 'yes' == params.get('status_error', 'no') err_msg = params.get('err_msg', '') case = params.get('case', '') hotplug = 'yes' == params.get('hotplug', 'no') need_pci_br = 'yes' == params.get('need_pci_br', 'no') pci_model = params.get('pci_model', 'pci') pci_model_name = params.get('pci_model_name') pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}')) pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no') sound_dev_model_type = params.get('sound_dev_model_type', '') sound_dev_address = params.get('sound_dev_address', '') iface_model = params.get('iface_model', '') iface_source = params.get('iface_source', '') iface_kwargs = eval(params.get('iface_kwargs', '{}')) max_slots = int(params.get('max_slots', 31)) pcie_br_count = int(params.get('pcie_br_count', 3)) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() vm = env.get_vm(vm_name) try: # Check if there is a pci/pcie-to-pci bridge, if so, # just use the existing pci/pcie-to-pci-bridge to test ori_pci_br = [dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model] if need_pci_br: # If there is not a pci/pcie-to-pci bridge to test, # create one and add to vm if not ori_pci_br: logging.info('No %s on vm, create one', pci_model) pci_bridge = create_pci_device(pci_model, pci_model_name) vmxml.add_device(pci_bridge) vmxml.sync() logging.debug(virsh.dumpxml(vm_name)) # Check if pci/pcie-to-pci bridge is successfully added vmxml = VMXML.new_from_inactive_dumpxml(vm_name) cur_pci_br = [dev for dev in vmxml.get_devices('controller') if dev.type == 'pci' and dev.model == pci_model] if not cur_pci_br: test.error('Failed to add %s controller to vm xml' % pci_model) pci_br = cur_pci_br[0] logging.debug('pci_br: %s', pci_br) pci_br_index = pci_br.index # If test scenario requires another pci device on pci/pcie-to-pci # bridge before hotplug, add a sound device and make sure # the 'bus' is same with pci bridge index if need_pci_br and pci_br_has_device: sound_dev = Sound() sound_dev.model_type = sound_dev_model_type sound_dev.address = eval(sound_dev_address % pci_br_index) logging.debug('sound_dev.address: %s', sound_dev.address) vmxml.add_device(sound_dev) if case != 'vm_with_pcie_br_1_br': vmxml.sync() # Test hotplug scenario if hotplug: vm.start() vm.wait_for_login().close() # Create interface to be hotplugged logging.info('Create interface to be hotplugged') target_bus = cur_pci_br[0].index target_bus = hex(int(target_bus)) logging.debug('target_bus: %s', target_bus) new_iface_kwargs = {'address': iface_kwargs['address'] % target_bus} logging.debug('address: %s', new_iface_kwargs['address']) iface = create_iface(iface_model, iface_source, **new_iface_kwargs) mac = iface.mac_address result = virsh.attach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) xml_after_attach = VMXML.new_from_dumpxml(vm_name) logging.debug(virsh.dumpxml(vm_name)) # Check if the iface with given mac address is successfully # attached with address bus equal to pcie/pci bridge's index iface_list = [ iface for iface in xml_after_attach.get_devices('interface') if iface.mac_address == mac and int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16) ] logging.debug('iface list after attach: %s', iface_list) if not iface_list: test.error('Failed to attach interface %s' % iface) # Check inside vm def check_inside_vm(session, expect=True): ip_output = session.cmd('ip a') logging.debug('output of "ip a": %s', ip_output) return expect if mac in ip_output else not expect session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, True), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully attached:' 'not found mac address %s' % mac) session.close() # Test hotunplug result = virsh.detach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(result) # Check if the iface with given mac address has been # successfully detached def is_hotunplug_interface_ok(): xml_after_detach = VMXML.new_from_dumpxml(vm_name) iface_list_after_detach = [ iface for iface in xml_after_detach.get_devices('interface') if iface.mac_address == mac ] logging.debug('iface list after detach: %s', iface_list_after_detach) return iface_list_after_detach == [] if not utils_misc.wait_for(is_hotunplug_interface_ok, timeout=20): test.fail('Failed to detach device: %s' % iface) logging.debug(virsh.dumpxml(vm_name)) # Check again inside vm session = vm.wait_for_serial_login() if not utils_misc.wait_for(lambda: check_inside_vm(session, False), timeout=60, step=5): test.fail('Check interface inside vm failed,' 'interface not successfully detached:' 'found mac address %s' % mac) session.close() # Other test scenarios of pci/pcie if case: logging.debug('iface_kwargs: %s', iface_kwargs) # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge. # or Invalid controller index for pcie-to-pci-bridge. if case in ('wrong_model_name', 'invalid_index'): pci_bridge = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs) vmxml.add_device(pci_bridge) result_to_check = virsh.define(vmxml.xml, debug=True) # Attach device with invalid slot to pcie-to-pci-bridge if case == 'attach_with_invalid_slot': iface = create_iface(iface_model, iface_source, **iface_kwargs) vmxml.add_device(iface) result_to_check = virsh.define(vmxml.xml, debug=True) # Test that pcie-to-pci-bridge has 31 available slots if case == 'max_slots': target_bus = cur_pci_br[0].index target_bus = hex(int(target_bus)) logging.debug('target_bus: %s', target_bus) # Attach 32 interfaces for i in range(max_slots + 1): logging.debug('address: %s', iface_kwargs['address']) new_iface_kwargs = {'address': iface_kwargs['address'] % (target_bus, hex(i + 1))} iface = create_iface(iface_model, iface_source, **new_iface_kwargs) logging.info('Attaching the %d th interface', i + 1) result_in_loop = virsh.attach_device( vm_name, iface.xml, flagstr='--config', debug=True) # Attaching the 32rd interfaces will fail if i == max_slots: status_error = True libvirt.check_exit_status(result_in_loop, expect_error=status_error) logging.debug(virsh.dumpxml(vm_name)) # Get all devices on pcie-to-pci-bridge from new xml # Test if it matches with value of max_slots new_xml = VMXML.new_from_dumpxml(vm_name) device_on_pci_br = [ dev for dev in new_xml.get_devices('interface') if dev.address['type_name'] == 'pci' and int(dev.address['attrs']['bus'], 16) == int(target_bus, 16) ] logging.info('All slots of pcie-to-pci-bridge is %d', len(device_on_pci_br)) if len(device_on_pci_br) != max_slots: test.fail('Max slots is %d instead of %d' % (len(device_on_pci_br), max_slots)) # Define a guest with pcie-to-pci-bridge controller's index <=bus if case.startswith('index_v_bus'): last_pci_index = max([ int(dev.index) for dev in vmxml.get_devices('controller') if dev.type == 'pci']) # New index of new pcie-bridge should be +1 new_index = last_pci_index + 1 if case.endswith('less_than'): new_bus = new_index + 1 elif case.endswith('equal_to'): new_bus = new_index
def run_libvirt_scsi(test, params, env): # Get variables. status_error = ('yes' == params.get("status_error", 'no')) img_type = ('yes' == params.get("libvirt_scsi_img_type", "no")) cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no")) partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no")) partition = params.get("libvirt_scsi_partition", "ENTER.YOUR.AVAILIBLE.PARTITION") vm_name = params.get("main_vm", "virt-tests-vm1") # Init a VM instance and a VMXML instance. vm = env.get_vm(vm_name) vmxml = VMXML.new_from_dumpxml(vm_name) # Keep a backup of xml to restore it in cleanup. backup_xml = vmxml.copy() # Add a scsi controller if there is not. controller_devices = vmxml.get_devices("controller") scsi_controller_exists = False for device in controller_devices: if device.type == "scsi": scsi_controller_exists = True break if not scsi_controller_exists: scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Add disk with bus of scsi into vmxml. if img_type: # Init a QemuImg instance. img_name = "libvirt_scsi" params['image_name'] = img_name image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name) # Create a image. img_path, _ = image.create(params) img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': {'file': img_path}}) img_disk.target = {'dev': "vde", 'bus': "scsi"} vmxml.add_device(img_disk) if cdrom_type: # Init a CdromDisk instance. cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi") cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir()) cdrom.close() cdrom_disk = Disk(type_name="file") cdrom_disk.device = "cdrom" cdrom_disk.target = {'dev': "vdf", 'bus': "scsi"} cdrom_disk.source = cdrom_disk.new_disk_source( **{'attrs': {'file': cdrom_path}}) vmxml.add_device(cdrom_disk) if partition_type: if partition.count("ENTER.YOUR"): raise error.TestNAError("Partition for partition test" "is not configured.") partition_disk = Disk(type_name="block") partition_disk.device = "disk" partition_disk.target = {'dev': "vdg", 'bus': "scsi"} partition_disk.source = partition_disk.new_disk_source( **{'attrs': {'dev': partition}}) vmxml.add_device(partition_disk) # sync the vmxml with VM. vmxml.sync() # Check the result of scsi disk. try: try: vm.start() # Start VM successfully. if status_error: raise error.TestFail('Starting VM successed in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) finally: # clean up. backup_xml.sync()
def run_virsh_attach_device(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Operate virsh on one or more devices 3) Check functionality of each device 4) Check functionality of mmconfig option 5) Restore domain 6) Handle results """ logging.info("Preparing initial VM state") # Prepare test environment and its parameters test_params = TestParams(params, env, test) if test_params.start_vm: # Make sure VM is working test_params.main_vm.verify_alive() test_params.main_vm.wait_for_login().close() else: # VM not suppose to be started if test_params.main_vm.is_alive(): test_params.main_vm.destroy(gracefully=True) # Capture backup of original XML early in test test_params.vmxml = VMXML.new_from_dumpxml(test_params.main_vm.name) # All devices should share same access state test_params.virsh = virsh.Virsh(ignore_status=True) logging.info("Creating %d test device instances", len(test_params.devs)) # Create test objects from cfg. class names via subclasses above test_devices = [globals()[class_name](test_params) # instantiate for class_name in test_params.devs] # vadu_dev_objs operational_results = [] preboot_results = [] pstboot_results = [] try: operational_action(test_params, test_devices, operational_results) # Can't do functional testing with a cold VM, only test hot-attach preboot_action(test_params, test_devices, preboot_results) logging.info("Preparing test VM state for post-boot functional testing") if test_params.start_vm: # Hard-reboot required test_params.main_vm.destroy(gracefully=True, free_mac_addresses=False) try: test_params.main_vm.start() except virt_vm.VMStartError: raise error.TestFail('VM Failed to start for some reason!') # Signal devices reboot is finished for test_device in test_devices: test_device.booted = True test_params.main_vm.wait_for_login().close() postboot_action(test_params, test_devices, pstboot_results) analyze_results(test_params, operational_results, preboot_results, pstboot_results) finally: logging.info("Restoring VM from backup, then checking results") test_params.main_vm.destroy(gracefully=False, free_mac_addresses=False) test_params.vmxml.undefine() test_params.vmxml.restore() # Recover the original XML test_params.vmxml.define() if not test_params.start_vm: # Test began with not start_vm, shut it down. test_params.main_vm.destroy(gracefully=True) # Device cleanup can raise multiple exceptions, do it last: logging.info("Cleaning up test devices") test_params.cleanup(test_devices)