Пример #1
0
    def configure_serial_console(vm_name):
        """Configure serial console"""
        # Check the primary serial and set it to pty.
        VMXML.set_primary_serial(vm_name, 'pty', '0', None)
        # Configure VM pty console.
        vm_pty_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        vm_pty_xml.remove_all_device_by_type('console')

        console = Console()
        console.target_port = '0'
        console.target_type = 'serial'
        vm_pty_xml.add_device(console)
        vm_pty_xml.sync()
Пример #2
0
def run_mix_boot_order_os_boot(params, libvirtd, vm):
    """
    Define a domain mixing boot device and disk boot order.
    """
    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        if not vm_xml.os.boots:
            os_xml = vm_xml.os
            os_xml.boots = {'dev': 'hd'}
            vm_xml.os = os_xml
        else:
            logging.debug(vm_xml.os.boots)

        order = 0
        devices = vm_xml.devices
        for device in devices:
            if device.device_tag == 'disk':
                device.boot = order
                order += 1
        vm_xml.devices = devices

        try:
            vm_xml.sync()
        except LibvirtXMLError:
            pass
    finally:
        vm_xml_backup.sync()
Пример #3
0
        def _find_comm_paths(session):
            if source_path is None:
                host_path = _get_autopath()
            else:
                host_path = source_path

            name_port_map = {}
            base_path = '/sys/class/virtio-ports'
            vports = session.cmd_output('ls %s' % base_path).split()
            status = session.cmd_status('ls %s/*/name' % base_path)
            if status == 0:
                for vport in vports:
                    vport_path = os.path.join(base_path, vport)
                    name_path = os.path.join(vport_path, 'name')
                    name = session.cmd_output('cat %s' % name_path).strip()
                    name_port_map[name] = vport

                if expect_name not in name_port_map:
                    raise error.TestFail("Expect get vport name %s, got %s" %
                                         (expect_name, name_port_map))
                vport = name_port_map[expect_name]
            else:
                active_xml = VMXML.new_from_dumpxml(vm_name)
                port_number = active_xml.xmltreefile.find(
                    '/devices/channel/address').get('port')
                vport = 'vport1p%s' % port_number
            guest_path = '/dev/%s' % vport
            return guest_path, host_path
Пример #4
0
def run(test, params, env):
    """
    Test svirt in virt-clone.
    """
    VIRT_CLONE = None
    try:
        VIRT_CLONE = utils_misc.find_command("virt-clone")
    except ValueError:
        raise error.TestNAError("No virt-clone command found.")

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux")
    sec_label = params.get("svirt_virt_clone_vm_sec_label", None)
    sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_virt_clone_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    clone_name = ("%s-clone" % vm.name)
    try:
        cmd = ("%s --original %s --name %s --auto-clone" %
               (VIRT_CLONE, vm.name, clone_name))
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to execute virt-clone command."
                                 "Detail: %s." % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if not virsh.domstate(clone_name).exit_status:
            libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
def run(test, params, env):
    """
    Test for adding controller for usb.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    index = params.get("index", "1")
    index_conflict = "yes" == params.get("index_conflict", "no")
    model = params.get("model", "nec-xhci")

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    controllers = vm_xml.get_devices(device_type="controller")
    devices = vm_xml.get_devices()
    for dev in controllers:
        if dev.type == "usb":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "usb"
    controller.index = index
    controller.model = model
    devices.append(controller)
    if index_conflict:
        controller_1 = Controller("controller")
        controller_1.type = "usb"
        controller_1.index = index
        devices.append(controller)

    vm_xml.set_devices(devices)

    try:
        try:
            vm_xml.sync()
            vm.start()

            if status_error:
                raise error.TestFail("Add controller successfully in negative case.")
            else:
                try:
                    session = vm.wait_for_login()
                except (LoginError, ShellError), e:
                    error_msg = "Test failed in positive case.\n error: %s\n" % e
                    raise error.TestFail(error_msg)
                cmd = "dmesg -c | grep %s" % model.split('-')[-1]
                stat_dmesg = session.cmd_status(cmd)
                if stat_dmesg != 0:
                    raise error.TestNAError("Fail to run dmesg in guest")
                session.close()
        except (LibvirtXMLError, VMStartError), e:
            if not status_error:
                raise error.TestFail("Add controller failed. Detail: %s" % e)
    finally:
        vm_xml_backup.sync()
Пример #6
0
    def _check_xml():
        """
        Check defined XML against expectation
        """
        expected_channel = Channel(channel_type)

        try:
            source_dict = channel.source
        except LibvirtXMLNotFoundError:
            source_dict = {}

        if channel_type == 'pty':
            source_dict = {}
        elif channel_type == 'unix':
            if source_mode is None:
                if source_path:
                    source_dict['mode'] = 'connect'
                else:
                    source_dict['mode'] = 'bind'
            if source_path is None:
                source_dict['path'] = _get_autopath()
            if source_autopath:
                del source_dict['autopath']

        target_dict = {}
        if target_type == 'virtio':
            expected_channel.address = {
                'bus': '0',
                'controller': '0',
                'port': '1',
                'type': 'virtio-serial',
            }
            if 'type' in channel.target:
                target_dict['type'] = channel.target['type']
            if 'name' in channel.target:
                target_dict['name'] = channel.target['name']
        elif target_type == 'guestfwd':
            if 'type' in channel.target:
                target_dict['type'] = channel.target['type']
            if 'address' in channel.target:
                target_dict['address'] = channel.target['address']
            if 'port' in channel.target:
                target_dict['port'] = channel.target['port']

        if source_dict:
            expected_channel.source = source_dict
        if target_dict:
            expected_channel.target = target_dict

        current_xml = VMXML.new_from_dumpxml(vm_name)
        channel_elem = current_xml.xmltreefile.find('devices/channel')
        cur_channel = Channel.new_from_element(channel_elem)
        if not (expected_channel == cur_channel):
            raise error.TestFail("Expect generate channel:\n%s\nBut got:\n%s" %
                                 (expected_channel, cur_channel))
def run(test, params, env):
    """
    Test of libvirt SPICE related features.

    1) Block specified ports if required;
    2) Setup SPICE TLS certification if required;
    3) Setup graphics tag in VM;
    4) Try to start VM;
    5) Parse and check result with expected.
    6) Clean up environment.
    """

    vm_name = params.get("main_vm", "virt-tests-vm1")
    spice_xml = params.get("spice_xml", "no") == 'yes'
    vnc_xml = params.get("vnc_xml", "no") == 'yes'
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy()

    sockets = block_ports(params)
    networks = setup_networks(params)

    expected_result = get_expected_results(params, networks)
    env_state = EnvState(params, expected_result)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        vm_xml.remove_all_graphics()
        if spice_xml:
            spice_graphic = generate_spice_graphic_xml(params, expected_result)
            logging.debug('Test SPICE XML is: %s', spice_graphic)
            vm_xml.devices = vm_xml.devices.append(spice_graphic)
        if vnc_xml:
            vnc_graphic = generate_vnc_graphic_xml(params, expected_result)
            logging.debug('Test VNC XML is: %s', vnc_graphic)
            vm_xml.devices = vm_xml.devices.append(vnc_graphic)
        vm_xml.sync()
        all_ips = utils_net.get_all_ips()

        fail_patts = expected_result['fail_patts']
        try:
            vm.start()
        except virt_vm.VMStartError, detail:
            if not fail_patts:
                raise error.TestFail(
                    "Expect VM can be started, but failed with: %s" % detail)
            for patt in fail_patts:
                if re.search(patt, str(detail)):
                    return
            raise error.TestFail(
                "Expect fail with error in %s, but failed with: %s"
                % (fail_patts, detail))
        else:
Пример #8
0
 def check_dumpxml():
     """
     Check whether the added devices are shown in the guest xml
     """
     pattern = "<sound model=\"%s\">" % sound_model
     # Check sound model
     xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
     if pattern not in str(xml_after_adding_device):
         test.fail("Can not find the %s sound device xml "
                   "in the guest xml file." % sound_model)
     # Check codec type
     if codec_type:
         pattern = "<codec type=\"%s\" />" % codec_type
         if pattern not in str(xml_after_adding_device):
             test.fail("Can not find the %s codec xml for sound dev "
                       "in the guest xml file." % codec_type)
Пример #9
0
def run_invalid_interface(params, libvirtd, vm):
    """
    Define a domain with an invalid interface device.
    """
    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        iface_xml = interface.Interface('bridge')
        iface_xml.set_target({'dev': 'vnet'})
        devices = vm_xml.devices
        devices.append(iface_xml)
        vm_xml.devices = devices

        try:
            vm_xml.sync()
        except LibvirtXMLError:
            pass
    finally:
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError), e:
        raise error.TestFail("Test failed: %s" % str(e))
Пример #11
0
    def _verify_attach_channel_device(char_type, port_id):
        """
        Test unix socket communication between host and guest through
        channel
        :param char_type: the type of the channel
        :param port_id: the port id of the channel
        """
        result = virsh.attach_device(vm_name, xml_file)
        if result.stderr:
            test.fail('Failed to attach %s to %s. Result:\n %s' %
                      ('pty', vm_name, result))
        current_xml = VMXML.new_from_dumpxml(vm_name)
        channel_devices = current_xml.get_devices('channel')

        found_dev = False
        for channel_device in channel_devices:
            if channel_device.address['port'] == port_id:
                found_dev = True
            break
        if not found_dev:
            logging.debug("Failed to find channel with port %s", port_id)
        return found_dev
Пример #12
0
    def check_guest_contr():
        """
        Check the controller in guest xml

        :raise: test.fail if the controller does not meet the expectation
        """
        cntl = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        for cntl in cur_vm_xml.devices.by_device_tag('controller'):
            if (cntl.type == 'pci' and
               cntl.model == contr_model and
               cntl.index == contr_index):
                logging.debug(cntl.target)
                cntl_hotplug = cntl.target.get('hotplug')
                logging.debug("Got controller's hotplug:%s", cntl_hotplug)
                if cntl_hotplug != hotplug_option:
                    test.fail("The controller's hotplug option is {}, "
                              "but expect {}".format(cntl_hotplug,
                                                     hotplug_option))
                break
        if not cntl:
            test.fail("The controller with index {} is not found".format(contr_index))
    def get_controller_addr(cntlr_type=None, model=None, index=None):
        """
        Get the address of testing controller from VM XML as a string with
        format "bus:slot.function".
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        addr = None
        for elem in cur_vm_xml.xmltreefile.findall('/devices/controller'):
            if ((cntlr_type is None or elem.get('type') == cntlr_type)
                    and (model is None or elem.get('model') == model)
                    and (index is None or elem.get('index') == index)):
                addr_elem = elem.find('./address')
                if addr_elem is not None:
                    addr = Address.new_from_element(addr_elem).attrs

        if addr is not None:
            bus = int(addr['bus'], 0)
            slot = int(addr['slot'], 0)
            func = int(addr['function'], 0)
            addr_str = '%02d:%02d.%1d' % (bus, slot, func)
            logging.debug("String for address element %s is %s", addr,
                          addr_str)
            return addr_str
Пример #14
0
    def get_controller_addr(cntlr_type=None, model=None, index=None):
        """
        Get the address of testing controller from VM XML as a string with
        format "bus:slot.function".
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        addr = None
        for elem in cur_vm_xml.xmltreefile.findall('/devices/controller'):
            if (
                    (cntlr_type is None or elem.get('type') == cntlr_type) and
                    (model is None or elem.get('model') == model) and
                    (index is None or elem.get('index') == index)):
                addr_elem = elem.find('./address')
                if addr_elem is not None:
                    addr = Address.new_from_element(addr_elem).attrs

        if addr is not None:
            bus = int(addr['bus'], 0)
            slot = int(addr['slot'], 0)
            func = int(addr['function'], 0)
            addr_str = '%02d:%02d.%1d' % (bus, slot, func)
            logging.debug("String for address element %s is %s", addr, addr_str)
            return addr_str
Пример #15
0
def run(test, params, env):
    """
    Confirm native 'dasd' partitions can be read
    when attached via 'virtio-blk'
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    try:
        disk_path = get_partitioned_dasd_path()
        attach_disk(vm_name, TARGET, disk_path)

        session = vm.wait_for_login()
        check_dasd_partition_table(session, TARGET)
    finally:
        # sync will release attached disk, precondition for disablement
        backup_xml.sync()
        global TEST_DASD_ID
        if TEST_DASD_ID:
            disable_disk(TEST_DASD_ID)
Пример #16
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
Пример #17
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
Пример #18
0
    def configure_serial_console(vm_name, dev_type, guest_log_file=None):
        """
        Configure serial console.

        :params vm_name: guest name
        :params dev_type: device type
        :params guest_log_file: the path of VM log file
        """
        guest_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        guest_xml.remove_all_device_by_type('serial')
        guest_xml.remove_all_device_by_type('console')

        serial = Serial(dev_type)
        serial.target_port = '0'

        console = Console(dev_type)
        console.target_port = '0'
        console.target_type = 'serial'

        if dev_type == "file" and guest_log_file is not None:
            serial.sources = console.sources = [{'path': guest_log_file, 'append': 'off'}]
        guest_xml.add_device(serial)
        guest_xml.add_device(console)
        guest_xml.sync()
Пример #19
0
    def get_search_patt_qemu_line():
        """
        Check if the guest XML has the expected content.

        :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        qemu_list = []
        # Check the pci-root controller has index = 0
        if no_pci_controller == "yes":
            (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                              qemu_list, '0')
            return qemu_list

        # Check index numbers of pci-bridge controllers should be equal
        # to the pci_bus_number
        if int(pci_bus_number) > 0:
            return get_patt_non_zero_bus(cur_vm_xml)
        # All controllers should exist if there is a gap between two PCI
        # controller indexes
        if index and index_second and int(index) > 0 and int(index_second) > 0:
            for idx in range(int(index_second), int(index) + 1):
                (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                  qemu_list, str(idx))
            return qemu_list

        # All controllers should exist with index among [1..index]
        if index and int(index) > 0 and not index_second:
            for idx in range(1, int(index) + 1):
                (search_result, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                              qemu_list,
                                                              str(idx))
                if not search_result:
                    test.fail("Can not find %s controller "
                              "with index %s." % (model, str(idx)))
            return qemu_list
Пример #20
0
def run(test, params, env):
    """
    Test for basic controller device function.

    1) Define the VM w/o specified controller device and check result meets
       expectation.
    2) Start the guest and check if start result meets expectation
    3) Test the function of started controller device
    4) Shutdown the VM and clean up environment
    """
    def setup_os_xml():
        """
        Prepare os part of VM XML.

        """
        osxml = vm_xml.os
        orig_machine = osxml.machine
        if os_machine:
            osxml.machine = os_machine
            vm_xml.os = osxml
        else:
            cur_machine = orig_machine

    def setup_controller_xml(index, addr_target=None):
        """
        Prepare controller devices of VM XML.

        :param index: The index of controller
        :param addr_target: The controller address

        """
        ctrl = Controller(type_name=cntlr_type)
        if model:
            ctrl.model = model
        if pcihole:
            ctrl.pcihole64 = pcihole
        if vectors:
            ctrl.vectors = vectors
        if index:
            ctrl.index = index
        if chassisNr:
            ctrl.target = {'chassisNr': chassisNr}
        if model_name:
            ctrl.model_name = {'name': model_name}

        if addr_target:
            match = re.match(
                r"(?P<bus>[0-9]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9])",
                addr_target)
            if match:
                addr_dict = match.groupdict()
                addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
                addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
                addr_dict['function'] = hex(int(addr_dict['function'], 16))
                addr_dict['domain'] = '0x0000'
                ctrl.address = ctrl.new_controller_address(attrs=addr_dict)

        logging.debug("Controller XML is:%s", ctrl)
        vm_xml.add_device(ctrl)

        if cmpnn_cntlr_model is not None:
            for num in range(int(cmpnn_cntlr_num)):
                ctrl = Controller(type_name=cntlr_type)
                ctrl.model = cmpnn_cntlr_model + str(num + 1)
                ctrl.index = index
                logging.debug("Controller XML is:%s", ctrl)
                vm_xml.add_device(ctrl)

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        res = virsh.start(vm_name)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def prepare_qemu_pattern(elem):
        """
        Collect the patterns to be searched in qemu command line.

        :param elem: a Controller object

        :return: A list including search patterns
        """
        search_qemu_cmd = []

        bus = int(elem.address.attrs.get('bus'), 0)
        slot = int(elem.address.attrs.get('slot'), 0)
        func = int(elem.address.attrs.get('function'), 0)
        addr_str = '%02d:%02d.%1d' % (bus, slot, func)
        name = elem.alias.get('name')
        if elem.model != 'dmi-to-pci-bridge':
            chassisNR = elem.target.get('chassisNr')
            value = "pci-bridge,chassis_nr=%s" % chassisNR
            value = "%s,id=%s,bus=pci.%d,addr=%#x" % (value, name, bus, slot)
        else:
            value = "%s" % elem.model_name['name']
            value = "%s,id=%s,bus=pcie.%d,addr=%#x" % (value, name, bus, slot)

        tup = ('-device', value)
        search_qemu_cmd.append(tup)
        return search_qemu_cmd

    def search_controller(vm_xml,
                          cntl_type,
                          cntl_model,
                          cntl_index,
                          qemu_pattern=True):
        """
        Search a controller as specified and prepare the expected qemu
        command line
        :params vm_xml: The guest VMXML instance
        :params cntl_type: The controller type
        :params cntl_model: The controller model
        :params cntl_index: The controller index
        :params qemu_pattern: True if it needs to be checked with qemu
                              command line. False if not.

        :return: Tuple (Controller, List)
                       Boolean: True if the controller is found. Otherwise, False.
                       List: a list including qemu search patterns
        """
        logging.debug("Search controller with type %s, model %s index %s",
                      cntl_type, cntl_model, cntl_index)
        qemu_list = None
        found = False
        for elem in vm_xml.devices.by_device_tag('controller'):
            if (elem.type == cntl_type and elem.model == cntl_model
                    and elem.index == cntl_index):
                found = True
                if (qemu_pattern and cntl_model != 'pci-root'
                        and cntl_model != 'pcie-root'):
                    qemu_list = prepare_qemu_pattern(elem)
                return (elem, qemu_list)
        if not found:
            test.fail("Can not find %s controller "
                      "with index %s." % (cntl_model, cntl_index))

    def get_patt_inx_ctl(cur_vm_xml, qemu_list, inx):
        """
        Get search pattern in qemu line for some kind of cases

        :param cur_vm_xml: Guest xml
        :param qemu_list: List for storing qemu search patterns
        :param inx: Controller index used

        :return: a tuple for (search_result, qemu_list)

        """
        (search_result,
         qemu_search) = search_controller(cur_vm_xml, cntlr_type, model, inx)
        if qemu_search:
            qemu_list.extend(qemu_search)
        return (search_result, qemu_list)

    def get_patt_non_zero_bus(cur_vm_xml, qemu_list):
        """

        """
        actual_set = set()
        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if (elem.type == cntlr_type and elem.model == model):
                actual_set.add(int(elem.index))
                qemu_list = prepare_qemu_pattern(elem)
        expect_set = set()
        for num in range(1, int(pci_bus_number) + 1):
            expect_set.add(num)

        logging.debug("expect: %s, actual: %s", expect_set, actual_set)
        if (not actual_set.issubset(expect_set)
                or not expect_set.issubset(actual_set)):
            test.fail("The actual index set (%s)does "
                      "not match the expect index set "
                      "(%s)." % (actual_set, expect_set))
        return qemu_list

    def get_search_patt_qemu_line():
        """
        Check if the guest XML has the expected content.

        :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        qemu_list = []
        # Check the pci-root controller has index = 0
        if no_pci_controller == "yes":
            (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, '0')
            return qemu_list

        # Check index numbers of pci-bridge controllers should be equal
        # to the pci_bus_number
        if int(pci_bus_number) > 0:
            return get_patt_non_zero_bus(cur_vm_xml, qemu_list)
        # All controllers should exist if there is a gap between two PCI
        # controller indexes
        if index and index_second and int(index) > 0 and int(index_second) > 0:
            for idx in range(int(index_second), int(index) + 1):
                (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list,
                                                  str(idx))
            return qemu_list

        # All controllers should exist with index among [1..index]
        if index and int(index) > 0 and not index_second:
            for idx in range(1, int(index) + 1):
                (search_result,
                 qemu_list) = get_patt_inx_ctl(cur_vm_xml, qemu_list, str(idx))
                if not search_result:
                    test.fail("Can not find %s controller "
                              "with index %s." % (model, str(idx)))
            return qemu_list

    def get_controller_addr(cntlr_type=None, model=None, index=None):
        """
        Get the address of testing controller from VM XML as a string with
        format "bus:slot.function".

        :param cntlr_type: controller type
        :param model: controller model
        :param index: controller index

        :return: an address string of the specified controller
        """
        if model in ['pci-root', 'pcie-root']:
            return None

        addr_str = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)

        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if ((cntlr_type is None or elem.type == cntlr_type)
                    and (model is None or elem.model == model)
                    and (index is None or elem.index == index)):
                addr_elem = elem.address
                if addr_elem is None:
                    test.error("Can not find 'Address' "
                               "element for the controller")

                bus = int(addr_elem.attrs.get('bus'), 0)
                slot = int(addr_elem.attrs.get('slot'), 0)
                func = int(addr_elem.attrs.get('function'), 0)
                addr_str = '%02d:%02x.%1d' % (bus, slot, func)
                logging.debug("Controller address is %s", addr_str)
                break

        return addr_str

    def check_controller_addr():
        """
        Check test controller address against expectation.
        """
        addr_str = get_controller_addr(cntlr_type, model, index)

        if model in ['pci-root', 'pcie-root']:
            if addr_str is None:
                return
            else:
                test.fail('Expect controller do not have address, '
                          'but got "%s"' % addr_str)

        if index != 0:
            if '00:00' in addr_str:
                test.fail("Invalid PCI address 0000:00:00, "
                          "at least one of domain, bus, "
                          "or slot must be > 0")

        exp_addr_patt = r'00:[0-9]{2}.[0-9]'
        if model in ['ehci']:
            exp_addr_patt = r'0[1-9]:[0-9]{2}.[0-9]'
        if addr_str:
            exp_addr_patt = addr_str

        if not re.match(exp_addr_patt, addr_str):
            test.fail('Expect get controller address "%s", '
                      'but got "%s"' % (exp_addr_patt, addr_str))

    def check_qemu_cmdline(search_pattern=None):
        """
        Check domain qemu command line against expectation.

        :param search_pattern: search list with tuple objects
        """
        with open('/proc/%s/cmdline' % vm.get_pid()) as proc_file:
            cmdline = proc_file.read()
        logging.debug('Qemu command line: %s', cmdline)

        options = cmdline.split('\x00')
        # Search the command line options for the given patterns
        if search_pattern and isinstance(search_pattern, list):
            for pattern in search_pattern:
                key = pattern[0]
                value = pattern[1]
                logging.debug("key=%s, value=%s", key, value)
                found = False
                check_value = False
                for opt in options:
                    if check_value:
                        if opt == value:
                            logging.debug("Found the expected (%s %s) in qemu "
                                          "command line" % (key, value))
                            found = True
                            break
                        check_value = False
                    if key == opt:
                        check_value = True
                if not found:
                    test.fail("Can not find '%s %s' in qemu "
                              "command line" % (key, value))

        # Get pcihole options from qemu command line
        pcihole_opt = ''
        for idx, opt in enumerate(options):
            if 'pci-hole64-size' in opt:
                pcihole_opt = opt

        # Get expected pcihole options from params
        exp_pcihole_opt = ''
        if (cntlr_type == 'pci' and model in ['pci-root', 'pcie-root']
                and pcihole):
            if 'pc' in cur_machine:
                exp_pcihole_opt = 'i440FX-pcihost'
            elif 'q35' in cur_machine:
                exp_pcihole_opt = 'q35-pcihost'
            exp_pcihole_opt += '.pci-hole64-size=%sK' % pcihole

        # Check options against expectation
        if pcihole_opt != exp_pcihole_opt:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_pcihole_opt, pcihole_opt))

        # Check usb options against expectation
        if cntlr_type == "usb":
            pattern = ""
            if cmpnn_cntlr_num is not None:
                for num in range(int(cmpnn_cntlr_num)):
                    name = (cmpnn_cntlr_model + str(num + 1)).split('-')
                    pattern = pattern + r"-device.%s-usb-%s.*" % (name[0],
                                                                  name[1])
            elif model == "ehci":
                pattern = r"-device.usb-ehci"
            elif model == "qemu-xhci":
                pattern = r"-device.qemu-xhci"

            logging.debug("pattern is %s", pattern)

            if pattern and not re.search(pattern, cmdline):
                test.fail(
                    "Expect get usb model info in qemu cmdline, but failed!")

    def check_guest(cntlr_type, cntlr_model, cntlr_index=None):
        """
        Check status within the guest against expectation.
        """

        if model == 'pci-root' or model == 'pcie-root':
            return

        addr_str = get_controller_addr(cntlr_type=cntlr_type,
                                       model=cntlr_model,
                                       index=cntlr_index)
        pci_name = 'PCI bridge:'
        verbose_option = ""
        if cntlr_type == 'virtio-serial':
            verbose_option = '-vvv'

        if (addr_str is None and model != 'pci-root' and model != 'pcie-root'):
            test.error("Can't find target controller in XML")
        if cntlr_index:
            logging.debug("%s, %s, %s", cntlr_type, cntlr_model, cntlr_index)
        if (addr_str is None and cntlr_model != 'pci-root'
                and cntlr_model != 'pcie-root'):
            test.fail("Can't find target controller in XML")

        session = vm.wait_for_login(serial=True)
        status, output = session.cmd_status_output('lspci %s -s %s' %
                                                   (verbose_option, addr_str))
        logging.debug("lspci output is: %s", output)

        if (cntlr_type == 'virtio-serial' and (vectors and int(vectors) == 0)):
            if 'MSI' in output:
                test.fail("Expect MSI disable with zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'virtio-serial'
                and (vectors is None or int(vectors) != 0)):
            if 'MSI' not in output:
                test.fail("Expect MSI enable with non-zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'pci'):
            if pci_name not in output:
                test.fail("Can't find target pci device"
                          " '%s' on guest " % addr_str)

    os_machine = params.get('os_machine', None)
    libvirt.check_machine_type_arch(os_machine)
    cntlr_type = params.get('controller_type', None)
    model = params.get('controller_model', None)
    index = params.get('controller_index', None)
    vectors = params.get('controller_vectors', None)
    pcihole = params.get('controller_pcihole64', None)
    chassisNr = params.get('chassisNr', None)
    addr_str = params.get('controller_address', None)
    cmpnn_cntlr_model = params.get('companion_controller_model', None)
    cmpnn_cntlr_num = params.get('companion_controller_num', None)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    no_pci_controller = params.get("no_pci_controller", "no")
    pci_bus_number = params.get("pci_bus_number", "0")
    remove_address = params.get("remove_address", "yes")
    setup_controller = params.get("setup_controller", "yes")
    index_second = params.get("controller_index_second", None)
    cur_machine = os_machine
    check_qemu = "yes" == params.get("check_qemu", "no")
    check_within_guest = "yes" == params.get("check_within_guest", "no")
    run_vm = "yes" == params.get("run_vm", "no")
    second_level_controller_num = params.get("second_level_controller_num",
                                             "0")
    status_error = "yes" == params.get("status_error", "no")
    model_name = params.get("model_name", None)
    expect_err_msg = params.get("err_msg", None)

    if index and index_second:
        if int(index) > int(index_second):
            test.error("Invalid parameters")

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    try:
        vm_xml.remove_all_device_by_type('controller')
        if remove_address == "yes":
            remove_devices(vm_xml, 'address')
        remove_devices(vm_xml, 'usb')
        if setup_controller == "yes":
            if index_second:
                setup_controller_xml(index_second)
            setup_controller_xml(index, addr_str)
            if second_level_controller_num:
                for indx in range(2, int(second_level_controller_num) + 2):
                    addr_second = "0%s:0%s.0" % (index, str(indx))
                    setup_controller_xml(str(indx), addr_second)
        setup_os_xml()
        if int(pci_bus_number) > 0:
            address_params = {'bus': "%0#4x" % int(pci_bus_number)}
            libvirt.set_disk_attr(vm_xml, 'vda', 'address', address_params)

        logging.debug("Test VM XML before define is %s" % vm_xml)

        if not define_and_check():
            logging.debug("Can't define the VM, exiting.")
            return
        vm_xml = VMXML.new_from_dumpxml(vm_name)
        logging.debug("Test VM XML after define is %s" % vm_xml)

        check_controller_addr()
        if run_vm:
            try:
                if not start_and_check():
                    logging.debug("Can't start the VM, exiting.")
                    return
            except virt_vm.VMStartError as detail:
                test.fail(detail)

            search_qemu_cmd = get_search_patt_qemu_line()
            if check_qemu:
                check_qemu_cmdline(search_pattern=search_qemu_cmd)

            if check_within_guest:
                try:
                    if int(pci_bus_number) > 0:
                        for contr_idx in range(1, int(pci_bus_number) + 1):
                            check_guest(cntlr_type, model, str(contr_idx))
                        return
                    if index:
                        check_max_index = int(index) + int(
                            second_level_controller_num)
                        for contr_idx in range(1, int(check_max_index) + 1):
                            check_guest(cntlr_type, model, str(contr_idx))
                    else:
                        check_guest(cntlr_type, model)
                        if model == 'pcie-root':
                            # Need check other auto added controller
                            check_guest(cntlr_type, 'dmi-to-pci-bridge', '1')
                            check_guest(cntlr_type, 'pci-bridge', '2')
                except remote.LoginTimeoutError as e:
                    logging.debug(e)
                    if not status_error:
                        raise

    finally:
        vm_xml_backup.sync()
Пример #21
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in disks.values():
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            o_r = utils_selinux.verify_defcon(pathname=disk_path,
                                              readonly=False,
                                              forcedesc=True)
            orig_label_type = backup_labels_of_disks[disk_path].split(":")[2]
            if o_r and (orig_label_type != img_label_type):
                raise error.TestFail("change disk label(%s) failed" %
                                     img_label_type)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s" %
                                     (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s." %
                                     (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s" %
                                         (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546

                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Пример #22
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """

    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [dev for dev in vmxml.get_devices('controller')
                      if dev.type == 'pci' and dev.model == pci_model]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [dev for dev in vmxml.get_devices('controller')
                          if dev.type == 'pci' and dev.model == pci_model]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            target_bus = cur_pci_br[0].index
            target_bus = hex(int(target_bus))
            logging.debug('target_bus: %s', target_bus)

            new_iface_kwargs = {'address': iface_kwargs['address'] % target_bus}
            logging.debug('address: %s', new_iface_kwargs['address'])
            iface = create_iface(iface_model, iface_source, **new_iface_kwargs)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac and
                int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            # Check if the iface with given mac address has been
            # successfully detached
            def is_hotunplug_interface_ok():
                xml_after_detach = VMXML.new_from_dumpxml(vm_name)
                iface_list_after_detach = [
                    iface for iface in xml_after_detach.get_devices('interface')
                    if iface.mac_address == mac
                ]
                logging.debug('iface list after detach: %s', iface_list_after_detach)
                return iface_list_after_detach == []

            if not utils_misc.wait_for(is_hotunplug_interface_ok, timeout=20):
                test.fail('Failed to detach device: %s' % iface)
            logging.debug(virsh.dumpxml(vm_name))

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {'address': iface_kwargs['address']
                                        % (target_bus, hex(i + 1))}
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(
                        vm_name, iface.xml, flagstr='--config', debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci' and
                    int(dev.address['attrs']['bus'], 16) == int(target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
Пример #23
0
def test_active_nodedev_reset(device, vm, expect_succeed):
    """
    Test nodedev-reset when the specified device is attached to a VM

    :param devices        : Specified node device to be tested.
    :param vm             : VM the device is to be attached to.
    :param expect_succeed : 'yes' for expect command run successfully
                            and 'no' for fail.
    :raise TestFail       : If result doesn't meet expectation.
    :raise TestError      : If failed to recover environment.
    """
    # Split device name such as `pci_0000_00_19_0` and fill the XML.
    hostdev_xml = """
<hostdev mode='subsystem' type='%s' managed='yes'>
    <source>
        <address domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/>
    </source>
</hostdev>""" % tuple(device.split('_'))

    try:
        # The device need to be detached before attach to VM.
        virsh.nodedev_detach(device)
        try:
            # Backup VM XML.
            vmxml = VMXML.new_from_inactive_dumpxml(vm.name)

            # Generate a temp file to store host device XML.
            dev_fd, dev_fname = tempfile.mkstemp(dir=data_dir.get_tmp_dir())
            os.close(dev_fd)

            dev_file = open(dev_fname, 'w')
            dev_file.write(hostdev_xml)
            dev_file.close()

            # Only live VM allows attach device.
            if not vm.is_alive():
                vm.start()

            try:
                result = virsh.attach_device(vm.name, dev_fname)
                logging.debug(result)

                test_nodedev_reset([device], expect_succeed)
            finally:
                # Detach device from VM.
                result = virsh.detach_device(vm.name, dev_fname)
                # Raise error when detach failed.
                if result.exit_status:
                    raise error.TestError(
                        'Failed to dettach device %s from %s. Result:\n %s'
                        % (device, vm.name, result))
        finally:
            # Cleanup temp XML file and recover test VM.
            os.remove(dev_fname)
            vmxml.sync()
    finally:
        # Reattach node device
        result = virsh.nodedev_reattach(device)
        # Raise error when reattach failed.
        if result.exit_status:
            raise error.TestError(
                'Failed to reattach nodedev %s. Result:\n %s'
                % (device, result))
Пример #24
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            session.cmd("qemu-ga -d")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")

    # A backup of original vm
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            if 'pm' in vm_xml:
                del vm_xml.pm
        else:
            pm_xml = VMPM()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vm_xml.pm = pm_xml
            vm_xml.sync()

        VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        session = vm.wait_for_login()
        try:
            check_vm_guestagent(session)

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
        finally:
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

        if result.exit_status == 0:
            if fail_pat:
                raise error.TestFail("Expected failed with %s, but run succeed"
                                     ":\n%s" % (fail_pat, result))
        else:
            if not fail_pat:
                raise error.TestFail("Expected success, but run failed:\n%s"
                                     % result)
            #if not any_pattern_match(fail_pat, result.stderr):
            if not any(p in result.stderr for p in fail_pat):
                raise error.TestFail("Expected failed with one of %s, but "
                                     "failed with:\n%s" % (fail_pat, result))
    finally:
        # Recover xml of vm.
        vm_xml_backup.sync()
Пример #25
0
def run(test, params, env):
    """
    Test for network bandwidth in libvirt.

    1. Preparation:
        * Init variables from params.
        * Keep a backup for vmxml and networkxml.
        * Build a file with dd command.
    2. Edit vmxml and networkxml to control the bandwidth.
    3. Verify the bandwidth with scp.
    4. Clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    inbound_average = params.get("LNB_inbound_average", "512")
    inbound_peak = params.get("LNB_inbound_peak", "512")
    inbound_burst = params.get("LNB_inbound_burst", "32")

    outbound_average = params.get("LNB_outbound_average", "512")
    outbound_peak = params.get("LNB_outbound_peak", "512")
    outbound_burst = params.get("LNB_outbound_burst", "32")

    config_type = params.get("LNB_config_type", "network")

    bandwidth_tolerance = float(params.get("LNB_bandwidth_tolerance",
                                           "20")) / 100

    file_size = params.get("LNB_verify_file_size", "10")

    nic1_params = params.object_params('nic1')
    # We assume this test is for a guest using default network.
    nettype = 'network'
    netdst = 'default'

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # This test assume that VM is using default network.
    # Check the interfaces of VM to make sure default network
    # is used by VM.
    interfaces = vm_xml.get_devices(device_type="interface")
    # interface which is using default network.
    default_interface = None
    for interface in interfaces:
        if interface.source == {nettype: netdst}:
            default_interface = interface
            break
    if not default_interface:
        test.cancel("VM is not using default network," "skip this test.")

    bandwidth_inbound = {
        'average': inbound_average,
        'peak': inbound_peak,
        'burst': inbound_burst
    }
    bandwidth_outbound = {
        'average': outbound_average,
        'peak': outbound_peak,
        'burst': outbound_burst
    }

    network_xml = NetworkXML.new_from_net_dumpxml("default")
    network_xml_backup = network_xml.copy()

    tmp_dir = data_dir.get_tmp_dir()
    file_path = os.path.join(tmp_dir, "scp_file")
    # Init a QemuImg instance.
    cmd = "dd if=/dev/zero of=%s bs=1M count=%s" % (file_path, file_size)
    process.run(cmd, shell=True)
    try:
        if config_type == "network":
            network_xml.bandwidth_inbound = bandwidth_inbound
            network_xml.bandwidth_outbound = bandwidth_outbound
            network_xml.sync()
        elif config_type == "interface":
            devices = vm_xml.devices
            for index in range(len(devices)):
                if not (devices[index].device_tag
                        == default_interface.device_tag):
                    continue
                if devices[index].mac_address == default_interface.mac_address:
                    bound = {
                        'inbound': bandwidth_inbound,
                        'outbound': bandwidth_outbound
                    }
                    default_interface.bandwidth = default_interface.new_bandwidth(
                        **bound)
                    devices[index] = default_interface
                    break
            vm_xml.devices = devices
            vm_xml.sync()
        elif config_type == "portgroup":
            if nettype != 'network':
                test.cancel("Portgroup is only applicable with "
                            "virtual network")
            # Add a portgroup into default network
            portgroup_name = "test_portgroup"
            portgroup = PortgroupXML()
            portgroup.name = portgroup_name
            portgroup.bandwidth_inbound = bandwidth_inbound
            portgroup.bandwidth_outbound = bandwidth_outbound
            network_xml.portgroup = portgroup
            network_xml.sync()
            # Using the portgroup in VM.
            devices = vm_xml.devices
            for index in range(len(devices)):
                if not (devices[index].device_tag
                        == default_interface.device_tag):
                    continue
                if devices[index].mac_address == default_interface.mac_address:
                    default_interface.source = {
                        nettype: netdst,
                        'portgroup': portgroup_name
                    }
                    devices[index] = default_interface
                    break
            vm_xml.devices = devices
            vm_xml.sync()
        else:
            test.cancel("Unsupported parameter config_type=%s." % config_type)

        # SCP to check the network bandwidth.
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm.start()
        vm.wait_for_login()
        time_before = time.time()
        vm.copy_files_to(host_path=file_path, guest_path="/root")
        time_after = time.time()

        speed_expected = int(inbound_average)
        speed_actual = (10 * 1024 // (time_after - time_before))
        if not (abs(speed_actual - speed_expected) <=
                speed_expected * bandwidth_tolerance):
            test.fail("Speed from host to guest is %s.\n"
                      "But the average of bandwidth.inbound is %s.\n" %
                      (speed_actual, speed_expected))
        time_before = time.time()
        vm.copy_files_from(host_path=file_path, guest_path="/root/scp_file")
        time_after = time.time()

        speed_expected = int(outbound_average)
        speed_actual = (10 * 1024 // (time_after - time_before))
        if not (abs(speed_actual - speed_expected) <=
                speed_expected * bandwidth_tolerance):
            test.fail("Speed from guest to host is %s.\n"
                      "But the average of bandwidth.outbound is %s\n" %
                      (speed_actual, speed_expected))

    finally:
        if os.path.exists(file_path):
            os.remove(file_path)
        network_xml_backup.sync()
        vm_xml_backup.sync()
Пример #26
0
    logging.debug("Get %s vcpus in virsh vcpuinfo output", vcpuinfo_num)
    if vcpuinfo_num != int(expect_vcpu_num[i]):
        raise error.TestFail("Vcpu number in virsh vcpuinfo is unexpected")
    vcpuinfo_affinity = re.findall('CPU Affinity: +([-y]+)', output)
    logging.debug("Command vcpuinfo check pass")

    # Check vcpu number in domain XML, if setvcpu with '--config' option,
    # or domain is dead, vcpu number correspond to expect_vcpu_num[2],
    # otherwise, it correspond to expect_vcpu_num[3]
    dumpxml_option = ""
    if setvcpu_option == "--config" or vm.is_dead():
        dumpxml_option = "--inactive"
        i = 2
    else:
        i = 3
    vmxml = VMXML()
    vmxml['xml'] = virsh.dumpxml(vm.name, dumpxml_option).stdout.strip()
    try:
        if vmxml['vcpu'] != int(expect_vcpu_num[0]):
            raise error.TestFail("Max vcpu number %s in domain XML is not"
                                 " expected" % vmxml['vcpu'])
        if vmxml['current_vcpu'] != expect_vcpu_num[i]:
            raise error.TestFail("Current vcpu number %s in domain XML is"
                                 " not expected" % vmxml['current_vcpu'])
    except (ValueError, IndexError), detail:
        raise error.TestFail(detail)
    logging.debug("Vcpu number in domain xml check pass")

    # check cpu affinity got from vcpupin command output, and vcpupin command
    # output, and vcpupin info(cputune element) in domain xml
    result = virsh.vcpupin(vm.name, ignore_status=True, debug=True)
Пример #27
0
def run(test, params, env):
    """
    Test of libvirt SPICE related features.

    1) Block specified ports if required;
    2) Setup SPICE TLS certification if required;
    3) Setup graphics tag in VM;
    4) Try to start VM;
    5) Parse and check result with expected.
    6) Clean up environment.
    """

    # Since 2.0.0, there are some changes for listen type and port
    # 1. Two new listen types: 'none' and 'socket'(not covered in this test)
    #    In 'none' listen type, no listen address and port is 0,
    #    that means we need reset the listen_type for previous versions,
    #    so we can get the expect result(vm start fail)
    # 2. Spice port accept negative number less than -1
    #    If spice port less than -1, the VM can start normally, but both
    #    listen address and port will be omitted
    spice_listen_type = params.get('spice_listen_type', 'not_set')
    vnc_listen_type = params.get('vnc_listen_type', 'not_set')
    spice_port = params.get('spice_port', 'not_set')
    spice_tlsPort = params.get('spice_tlsPort', 'not_set')
    if not libvirt_version.version_compare(2, 0, 0):
        for listen_type in [spice_listen_type, vnc_listen_type]:
            if listen_type == 'none':
                params[listen_type] = 'not_set'
    else:
        try:
            if int(spice_port) < -1:
                params["negative_test"] = "no"
        except ValueError:
            pass

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    spice_xml = params.get("spice_xml", "no") == 'yes'
    vnc_xml = params.get("vnc_xml", "no") == 'yes'
    is_negative = params.get("negative_test", "no") == 'yes'

    sockets = block_ports(params)
    networks = setup_networks(params)

    expected_result = get_expected_results(params, networks)
    env_state = EnvState(params, expected_result)

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        vm_xml.remove_all_graphics()
        if spice_xml:
            spice_graphic = generate_spice_graphic_xml(params, expected_result)
            logging.debug('Test SPICE XML is: %s', spice_graphic)
            vm_xml.devices = vm_xml.devices.append(spice_graphic)
        if vnc_xml:
            vnc_graphic = generate_vnc_graphic_xml(params, expected_result)
            logging.debug('Test VNC XML is: %s', vnc_graphic)
            vm_xml.devices = vm_xml.devices.append(vnc_graphic)
        vm_xml.sync()
        all_ips = utils_net.get_all_ips()

        fail_patts = expected_result['fail_patts']
        try:
            vm.start()
        except virt_vm.VMStartError, detail:
            if not fail_patts:
                raise error.TestFail(
                    "Expect VM can be started, but failed with: %s" % detail)
            for patt in fail_patts:
                if re.search(patt, str(detail)):
                    return
            raise error.TestFail(
                "Expect fail with error in %s, but failed with: %s"
                % (fail_patts, detail))
        else:
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num,
                       vcpu_current_num, vcpu_current_num]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s."
                                % (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))

        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
        # when VM started , and it required that vcpu 0(id=1) is always
        # present and non-hotpluggable, which means we can't hotunplug these
        # vcpus directly. So we can either hotplug more vcpus before we do
        # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
        # vcpus except vcpu 0, to make sure libvirt can find appropriate
        # hotpluggable vcpus to reach the desired target vcpu count. For
        # simple prepare step, here we choose to hotplug more vcpus.
        if vcpu_unplug:
            if setvcpu_option == "--live":
                logging.info("Hotplug vcpu to the maximum count to make sure"
                             " all these new plugged vcpus are hotunpluggable")
                result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                        debug=True)
                libvirt.check_exit_status(result)
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True, debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                       ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
Пример #29
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
                             "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in disks.values():
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            o_r = utils_selinux.verify_defcon(pathname=disk_path,
                                              readonly=False,
                                              forcedesc=True)
            orig_label_type = backup_labels_of_disks[disk_path].split(":")[2]
            if o_r and (orig_label_type != img_label_type):
                raise error.TestFail("change disk label(%s) failed" % img_label_type)
            os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined or
                security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after "
                                     "starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s"
                                     % (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s."
                                     % (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    raise error.TestFail("Label of disk is not relabeled by "
                                         "VM\nDetal: disk_context="
                                         "%s, imagelabel=%s"
                                         % (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                # Bug 547546 - RFE: the security drivers must remember original
                # permissions/labels and restore them after
                # https://bugzilla.redhat.com/show_bug.cgi?id=547546

                err_msg = "Label of disk is not restored in VM shuting down.\n"
                err_msg += "Detail: img_label_after=%s, " % img_label_after
                err_msg += "img_label_before=%s.\n" % img_label
                err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=547546"
                raise error.TestFail(err_msg)
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in backup_ownership_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined or
                security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Пример #30
0
def run_svirt_start_destroy(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test successed in negative case.")
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                raise error.TestFail("Label of VM is not expected after starting.\n"
                                     "Detail: vm_context=%s, sec_label=%s"
                                     % (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                raise error.TestFail("Label of disk is not expected after VM "
                                     "starting.\n"
                                     "Detail: disk_context=%s, img_label=%s."
                                     % (disk_context, img_label))
            if sec_relabel == "yes":
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()['imagelabel']
                if not disk_context == imagelabel:
                    raise error.TestFail("Label of disk is not relabeled by VM\n"
                                         "Detal: disk_context=%s, imagelabel=%s"
                                         % (disk_context, imagelabel))
            # Check the label of disk after VM being destroyed.
            vm.destroy()
            img_label_after = utils_selinux.get_context_of_file(
                filename=disks.values()[0]['source'])
            if (not img_label_after == img_label):
                raise error.TestFail("Bug: Label of disk is not restored in VM "
                                     "shuting down.\n"
                                     "Detail: img_label_after=%s, "
                                     "img_label_before=%s.\n"
                                     % (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep")

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep")

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
                        disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
                        disk_xml.target = {"dev": "sdb", "bus": "usb"}

                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += " drive-usb-disk"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += " mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += " keyboard"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += " tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test for PCI device passthrough to libvirt guest.

    a). NIC:
        1. Get params.
        2. Get the pci device for specific net_name.
        3. Attach pci device to guest.
        4. Start guest and set the ip to all the physical functions.
        5. Ping to server_ip from each physical function
           to verify the new network device.
    b). STORAGE:
        1. Get params.
        2. Get the pci device for specific storage_dev_name.
        3. Store the result of 'fdisk -l' on guest.
        3. Attach pci device to guest.
        4. Start guest and get the result of 'fdisk -l' on guest.
        5. Compare the result of 'fdisk -l' before and after
            attaching storage pci device to guest.
    """
    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no'))
    device_type = params.get("libvirt_pci_device_type", "NIC")
    pci_dev = None
    device_name = None
    pci_address = None
    bus_info = []
    if device_type == "NIC":
        pci_dev = params.get("libvirt_pci_net_dev_label")
        device_name = params.get("libvirt_pci_net_dev_name", "None")
    else:
        pci_dev = params.get("libvirt_pci_storage_dev_label")

    net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP")
    server_ip = params.get("libvirt_pci_server_ip",
                           "ENTER.YOUR.SERVER.IP")
    netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask")

    # Check the parameters from configuration file.
    if (pci_dev.count("ENTER")):
        test.cancel("Please enter your device name for test.")
    if (device_type == "NIC" and (net_ip.count("ENTER") or
                                  server_ip.count("ENTER") or
                                  netmask.count("ENTER"))):
        test.cancel("Please enter the ips and netmask for NIC test in config file")
    fdisk_list_before = None
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if device_type == "NIC":
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("lspci -nn")
        nic_list_before = output.splitlines()
        if sriov:
            # set the parameter max_vfs of igb module to 7. Then we can use
            # the virtual function pci device for network device.

            # command 'modprobe -r igb' to unload igb module
            # command '&& modprobe igb max_vfs=7' to load it again
            #          with max_vfs=7
            # command '|| echo 'FAIL' > output_file' is a flag to mean
            #          modprobe igb with max_vfs=7 failed.
            # command '|| modprobe igb' is a handler of error occured
            #          when we load igb again. If command 2 failed,
            #          this command will be executed to recover network.
            output_file = os.path.join(test.tmpdir, "output")
            if os.path.exists(output_file):
                os.remove(output_file)
            mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||"
                       "echo 'FAIL' > %s && modprobe igb &" % output_file)
            result = process.run(mod_cmd, ignore_status=True, shell=True)
            if os.path.exists(output_file):
                test.error("Failed to modprobe igb with max_vfs=7.")
            # Get the virtual function pci device which was generated above.
            pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
            virt_functions = pci_xml.cap.virt_functions
            if not virt_functions:
                test.error("Init virtual function failed.")
            pci_address = virt_functions[0]
            pci_dev = utils_test.libvirt.pci_label_from_address(pci_address,
                                                                radix=16)
            # Find the network name (ethX) is using this pci device.
            distro_details = distro.detect()
            if distro_details.name == 'Ubuntu':
                network_service = service.Factory.create_service("networking")
            else:
                network_service = service.Factory.create_service("network")
            network_service.restart()
            result = virsh.nodedev_list("net")
            nodedev_nets = result.stdout.strip().splitlines()
            device = None
            for nodedev in nodedev_nets:
                netxml = NodedevXML.new_from_dumpxml(nodedev)
                if netxml.parent == pci_dev:
                    device = nodedev
                    break
            if not device:
                test.error("There is no network name is using "
                           "Virtual Function PCI device %s." %
                           pci_dev)
            pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
            pci_address = pci_xml.cap.get_address_dict()
            vmxml.add_hostdev(pci_address)
        else:
            pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2)
            obj = PciAssignable()
            # get all functions id's
            pci_ids = obj.get_same_group_devs(pci_id)
            pci_devs = []
            for val in pci_ids:
                temp = val.replace(":", "_")
                pci_devs.extend(["pci_"+temp])
            pci_id = re.sub('[:.]', '_', pci_id)
            for val in pci_devs:
                val = val.replace(".", "_")
                pci_xml = NodedevXML.new_from_dumpxml(val)
                pci_address = pci_xml.cap.get_address_dict()
                vmxml.add_hostdev(pci_address)

    elif device_type == "STORAGE":
        # Store the result of "fdisk -l" in guest.
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
        fdisk_list_before = output.splitlines()

        pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
        pci_address = pci_xml.cap.get_address_dict()
        vmxml.add_hostdev(pci_address)
    try:
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        if device_type == "NIC":
            output = session.cmd_output("lspci -nn")
            nic_list_after = output.splitlines()
            if nic_list_after == nic_list_before:
                test.fail("passthrough Adapter not found in guest.")
            else:
                logging.debug("Adapter passthorughed to guest successfully")
            if sriov:
                try:
                    output = session.cmd_output("lspci -nn | grep %s" % device_name)
                    nic_id = str(output).split(' ', 1)[0]
                    nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session))
                    session.cmd("ip addr flush dev %s" % nic_name)
                    session.cmd("ip addr add %s/%s dev %s"
                                % (net_ip, netmask, nic_name))
                    session.cmd("ip link set %s up" % nic_name)
                    session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip))
                except aexpect.ShellError, detail:
                    test.error("Succeed to set ip on guest, but failed "
                               "to ping server ip from guest. %s \n" % detail)
            else:
                output = session.cmd_output("lspci -nn | grep %s" % device_name)
                nic_list = output.splitlines()
                for val in range(len(nic_list)):
                    bus_info.append(str(nic_list[val]).split(' ', 1)[0])
                    nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2]
                # check all functions get same iommu group
                if len(set(nic_list)) != 1:
                    test.fail("Multifunction Device passthroughed but "
                              "functions are in different iommu group")
                # ping to server from each function
                bus_info.sort()
                for val in bus_info:
                    nic_name = str(utils_misc.get_interface_from_pci_id(val, session))
                    try:
                        session.cmd("ip addr flush dev %s" % nic_name)
                        session.cmd("ip addr add %s/%s dev %s"
                                    % (net_ip, netmask, nic_name))
                        session.cmd("ip link set %s up" % nic_name)
                        session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip))
                    except aexpect.ShellError, detail:
                        test.error("Succeed to set ip on guest, but failed "
                                   "to ping server ip from guest. %s\n" % detail)
Пример #33
0
def run_svirt_attach_disk(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)
    # Init a QemuImg instance.
    params['image_name'] = img_name
    tmp_dir = data_dir.get_tmp_dir()
    image = qemu_storage.QemuImg(params, tmp_dir, img_name)
    # Create a image.
    img_path, result = image.create(params)
    # Set the context of the image.
    utils_selinux.set_context_of_file(filename=img_path, context=img_label)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    # Do the attach action.
    try:
        virsh.attach_disk(vm_name, source=img_path, target="vdf",
                          extra="--persistent", ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Attach disk %s to vdf on VM %s failed."
                             % (img_path, vm.name))

    # Check result.
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
        image.remove()
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': vm_sec_model,
                'relabel': vm_sec_relabel}
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel,
                    'sec_label': sec_label}
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    logging.debug("The guest failed to start as expected,"
                                  "details see bug: bugzilla.redhat.com/show_bug.cgi"
                                  "?id=1165485")
                else:
                    test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                if libvirt_version.version_compare(3, 1, 0) and disk_type == "block":
                    output = to_text(process.system_output(
                        "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source'])))
                    owner, group = output.strip().split()[2:4]
                    disk_context = format_user_group_str(owner, group)
                else:
                    stat_re = os.stat(disks[disk_target]['source'])
                    disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        test.fail("The disk label is not equal to "
                                  "'%s'." % sec_label_id)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
Пример #35
0
def run(test, params, env):
    """
    Test for PCI device passthrough to libvirt guest.

    a). NIC:
        1. Get params.
        2. Get the pci device for specific net_name.
        3. Attach pci device to guest.
        4. Start guest and set the ip of guest.
        5. Ping the server_ip of from guest
           to verify the new network device.
    b). STORAGE:
        1. Get params.
        2. Get the pci device for specific storage_dev_name.
        3. Store the result of 'fdisk -l' on guest.
        3. Attach pci device to guest.
        4. Start guest and get the result of 'fdisk -l' on guest.
        5. Compare the result of 'fdisk -l' before and after
            attaching storage pci device to guest.
    """
    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    device_type = params.get("libvirt_pci_device_type", "NIC")

    net_name = params.get("libvirt_pci_net_name", "eth0")
    server_ip = params.get("libvirt_pci_server_ip")

    storage_dev_name = params.get("libvirt_pci_storage_dev_name", "/dev/sdb")
    fdisk_list_before = None

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    pci_address = None
    if device_type == "NIC":
        # Find the pci device for given network device.
        result = virsh.nodedev_list("net")
        nodedev_nets = result.stdout.strip().splitlines()
        device = None
        for nodedev in nodedev_nets:
            netxml = NodedevXML.new_from_dumpxml(nodedev)
            if netxml.cap.interface == net_name:
                device = nodedev
                break
        if not device:
            raise error.TestError("There is no network device name of %s." %
                                  net_name)
        pci_dev = netxml.parent
        pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
        pci_address = pci_xml.cap.get_address_dict()

    elif device_type == "STORAGE":
        # Store the result of "fdisk -l" in guest.
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
        fdisk_list_before = output.splitlines()

        result = virsh.nodedev_list("storage")
        nodedev_storages = result.stdout.strip().splitlines()
        for nodedev in nodedev_storages:
            storage_xml = NodedevXML.new_from_dumpxml(nodedev)
            if storage_xml.cap.block == storage_dev_name:
                break
        if not nodedev:
            raise error.TestError("There is no block device name of %s." %
                                  storage_dev_name)
        pci_xml = NodedevXML.new_from_dumpxml(storage_xml.parent)
        pci_address = pci_xml.cap.get_address_dict()

    vmxml.add_hostdev(pci_address)

    try:
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        if device_type == "NIC":
            try:
                session.cmd("ping -c 4 %s" % server_ip)
            except aexpect.ShellError, detail:
                raise error.TestFail("Succeed to set ip on guest, but failed "
                                     "to ping server ip from guest.\n"
                                     "Detail: %s.", detail)
        elif device_type == "STORAGE":
            # Get the result of "fdisk -l" in guest, and compare the result with
            # fdisk_list_before.
            output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
            fdisk_list_after = output.splitlines()
            if fdisk_list_after == fdisk_list_before:
                raise error.TestFail("Didn't find the disk attached to guest.")
Пример #36
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [
        vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num,
        vcpu_current_num
    ]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s." %
                                (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name,
                                    vcpu_plug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name,
                                    vcpu_unplug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
Пример #37
0
def run(test, params, env):
    """
    Test for PCI device passthrough to libvirt guest.

    a). NIC:
        1. Get params.
        2. Get the pci device for specific net_name.
        3. Attach Physical Function's/Virtual Function's to single guest
        4. Start guest and set the ip to all the functions.
        5. Ping to server_ip from each function
           to verify the new network device.
    b). STORAGE:
        1. Get params.
        2. Get the pci device for specific storage_dev_name.
        3. Store the result of 'fdisk -l' on guest.
        3. Attach pci device to guest.
        4. Start guest and get the result of 'fdisk -l' on guest.
        5. Compare the result of 'fdisk -l' before and after
            attaching storage pci device to guest.
    """

    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no'))
    device_type = params.get("libvirt_pci_device_type", "NIC")
    vm_vfs = int(params.get("number_vfs", 2))
    pci_dev = None
    pci_address = None
    bus_info = []
    if device_type == "NIC":
        pf_filter = params.get("pf_filter", "0000:01:00.0")
        vf_filter = params.get("vf_filter", "Virtual Function")
    else:
        pci_dev = params.get("libvirt_pci_storage_dev_label")

    net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP")
    server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP")
    netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask")

    # Check the parameters from configuration file.
    if (device_type == "NIC"):
        if (pf_filter.count("ENTER")):
            test.cancel("Please enter your NIC Adapter details for test.")
        if (net_ip.count("ENTER") or server_ip.count("ENTER")
                or netmask.count("ENTER")):
            test.cancel("Please enter the ips and netmask for NIC "
                        "test in config file")
    elif (pci_dev.count("ENTER")):
        test.cancel("Please enter your Storage Adapter details for test.")
    fdisk_list_before = None
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if device_type == "NIC":
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        nic_list_before = vm.get_pci_devices()
        obj = PciAssignable(pf_filter_re=pf_filter, vf_filter_re=vf_filter)
        # get all functions id's
        pci_ids = obj.get_same_group_devs(pf_filter)
        pci_devs = []
        for val in pci_ids:
            temp = val.replace(":", "_")
            pci_devs.extend(["pci_" + temp])
        if sriov:
            # The SR-IOV setup of the VF's should be done by test_setup
            # PciAssignable class.

            for pf in pci_ids:
                obj.set_vf(pf, vm_vfs)
                cont = obj.get_controller_type()
                if cont == "Infiniband controller":
                    obj.set_linkvf_ib()
            for val in pci_devs:
                val = val.replace(".", "_")
                # Get the virtual functions of the pci devices
                # which was generated above.
                pci_xml = NodedevXML.new_from_dumpxml(val)
                virt_functions = pci_xml.cap.virt_functions
                if not virt_functions:
                    test.fail("No Virtual Functions found.")
                for val in virt_functions:
                    pci_dev = utils_test.libvirt.pci_label_from_address(
                        val, radix=16)
                    pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
                    pci_address = pci_xml.cap.get_address_dict()
                    vmxml.add_hostdev(pci_address)
        else:
            for val in pci_devs:
                val = val.replace(".", "_")
                pci_xml = NodedevXML.new_from_dumpxml(val)
                pci_address = pci_xml.cap.get_address_dict()
                vmxml.add_hostdev(pci_address)

    elif device_type == "STORAGE":
        # Store the result of "fdisk -l" in guest.
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
        fdisk_list_before = output.splitlines()

        pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
        pci_address = pci_xml.cap.get_address_dict()
        vmxml.add_hostdev(pci_address)
    try:
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        # The Network configuration is generic irrespective of PF or SRIOV VF
        if device_type == "NIC":
            nic_list_after = vm.get_pci_devices()
            net_ip = netaddr.IPAddress(net_ip)
            if sorted(nic_list_after) == sorted(nic_list_before):
                test.fail("Passthrough Adapter not found in guest.")
            else:
                logging.debug("Adapter passthroughed to guest successfully")
            nic_list = list(
                set(nic_list_after).difference(set(nic_list_before)))
            for val in range(len(nic_list)):
                bus_info.append(str(nic_list[val]).split(' ', 1)[0])
                nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2]
            bus_info.sort()
            if not sriov:
                # check all functions get same iommu group
                if len(set(nic_list)) != 1:
                    test.fail("Multifunction Device passthroughed but "
                              "functions are in different iommu group")
            # ping to server from each function
            for val in bus_info:
                nic_name = str(
                    utils_misc.get_interface_from_pci_id(val, session))
                session.cmd("ip addr flush dev %s" % nic_name)
                session.cmd("ip addr add %s/%s dev %s" %
                            (net_ip, netmask, nic_name))
                session.cmd("ip link set %s up" % nic_name)
                # Pinging using nic_name is having issue,
                # hence replaced with IPAddress
                s_ping, o_ping = utils_test.ping(server_ip,
                                                 count=5,
                                                 interface=net_ip,
                                                 timeout=30,
                                                 session=session)
                logging.info(o_ping)
                if s_ping != 0:
                    err_msg = "Ping test fails, error info: '%s'"
                    test.fail(err_msg % o_ping)
                # Each interface should have unique IP
                net_ip = net_ip + 1

        elif device_type == "STORAGE":
            # Get the result of "fdisk -l" in guest, and
            # compare the result with fdisk_list_before.
            output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
            fdisk_list_after = output.splitlines()
            if fdisk_list_after == fdisk_list_before:
                test.fail("Didn't find the disk attached to guest.")
    finally:
        backup_xml.sync()
        # For SR-IOV , VF's should be cleaned up in the post-processing.
        if sriov:
            if obj.get_vfs_count() != 0:
                for pci_pf in pci_ids:
                    obj.set_vf(pci_pf, vf_no="0")
Пример #38
0
def run(test, params, env):
    """
    Test the input virtual devices

    1. prepare a guest with different input devices
    2. check whether the guest can be started
    3. check the qemu cmd line
    """
    def check_dumpxml():
        """
        Check whether the added devices are shown in the guest xml
        """
        pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type)
        if with_packed:
            pattern = "<driver packed=\"%s\"" % (driver_packed)
        logging.debug('Searching for %s in vm xml', pattern)
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug('xml_after_adding_device:\n%s', xml_after_adding_device)
        if pattern not in str(xml_after_adding_device):
            test.fail("Can not find the %s input device xml "
                      "in the guest xml file." % input_type)

    def check_qemu_cmd_line():
        """
        Check whether the added devices are shown in the qemu cmd line
        """
        # if the tested input device is a keyboard or mouse with ps2 bus,
        # there is no keyboard or mouse in qemu cmd line
        if bus_type == "ps2" and input_type in ["keyboard", "mouse"]:
            return
        with open('/proc/%s/cmdline' % vm.get_pid(), 'r') as cmdline_file:
            cmdline = cmdline_file.read()
        if bus_type == "usb" and input_type == "keyboard":
            pattern = r"-device.*%s-kbd" % bus_type
        elif input_type == "passthrough":
            pattern = r"-device.*%s-input-host-pci" % bus_type
        else:
            pattern = r"-device.*%s-%s" % (bus_type, input_type)
        if not re.search(pattern, cmdline):
            test.fail("Can not find the %s input device "
                      "in qemu cmd line." % input_type)
        if with_packed:
            pattern = r"packed.*%s" % driver_packed
            if not re.search(pattern, cmdline):
                test.fail("Can not find the packed driver " "in qemu cmd line")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    machine_type = params.get('machine_type', '')
    status_error = params.get("status_error", "no") == "yes"
    with_packed = params.get("with_packed", "no") == "yes"
    driver_packed = params.get("driver_packed", "on")
    bus_type = params.get("bus_type")
    input_type = params.get("input_type")

    check_preconditions(bus_type, input_type, with_packed, test)

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()

    try:
        # ps2 keyboard and ps2 mouse are default, no need to re-add the xml,
        # unless it's machine_type is pseries
        if not (bus_type == "ps2" and input_type in ["keyboard", "mouse"]
                and machine_type != 'pseries'):
            vm_xml.remove_all_device_by_type('input')
            input_dev = Input(type_name=input_type)
            input_dev.input_bus = bus_type
            if input_type == "passthrough":
                kbd_dev_name = glob.glob('/dev/input/by-path/*kbd')
                if not kbd_dev_name:
                    test.cancel("There is no keyboard device on this host.")
                logging.debug(
                    "keyboard %s is going to be passthrough "
                    "to the host.", kbd_dev_name[0])
                input_dev.source_evdev = kbd_dev_name[0]
            if with_packed:
                input_dev.driver_packed = driver_packed
            vm_xml.add_device(input_dev)
            try:
                vm_xml.sync()
            except Exception as error:
                if not status_error:
                    test.fail(
                        "Failed to define the guest after adding the %s input "
                        "device xml. Details: %s " % (input_type, error))
                logging.debug(
                    "This is the expected failing in negative cases.")
                return

        res = virsh.start(vm_name)
        if res.exit_status:
            if not status_error:
                test.fail("Failed to start vm after adding the %s input "
                          "device xml. Details: %s " %
                          (input_type, res.stderr))
            logging.debug("This is the expected failure in negative cases.")
            return
        if status_error:
            test.fail(
                "Expected fail in negative cases but vm started successfully.")
            return

        logging.debug("VM started successfully in positive cases.")
        check_dumpxml()
        check_qemu_cmd_line()
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test for PCI device passthrough to libvirt guest.

    a). NIC:
        1. Get params.
        2. Get the pci device for specific net_name.
        3. Attach pci device to guest.
        4. Start guest and set the ip to all the physical functions.
        5. Ping to server_ip from each physical function
           to verify the new network device.
    b). STORAGE:
        1. Get params.
        2. Get the pci device for specific storage_dev_name.
        3. Store the result of 'fdisk -l' on guest.
        3. Attach pci device to guest.
        4. Start guest and get the result of 'fdisk -l' on guest.
        5. Compare the result of 'fdisk -l' before and after
            attaching storage pci device to guest.
    """

    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no'))
    device_type = params.get("libvirt_pci_device_type", "NIC")
    pci_dev = None
    device_name = None
    pci_address = None
    bus_info = []
    if device_type == "NIC":
        pci_dev = params.get("libvirt_pci_net_dev_label")
        device_name = params.get("libvirt_pci_net_dev_name", "None")
    else:
        pci_dev = params.get("libvirt_pci_storage_dev_label")

    net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP")
    server_ip = params.get("libvirt_pci_server_ip",
                           "ENTER.YOUR.SERVER.IP")
    netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask")

    # Check the parameters from configuration file.
    if (pci_dev.count("ENTER")):
        test.cancel("Please enter your device name for test.")
    if (device_type == "NIC" and (net_ip.count("ENTER") or
                                  server_ip.count("ENTER") or
                                  netmask.count("ENTER"))):
        test.cancel("Please enter the ips and netmask for NIC "
                    "test in config file")
    fdisk_list_before = None
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if device_type == "NIC":
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("lspci -nn")
        nic_list_before = output.splitlines()
        if sriov:
            # The SR-IOV setup of the VF's should be done by test_setup
            # based on the driver options.
            # Usage of the PciAssignable for setting up of the VF's
            # is generic, and eliminates the need to hardcode the driver
            # and number of VF's to be created.

            sriov_setup = PciAssignable(
                driver=params.get("driver"),
                driver_option=params.get("driver_option"),
                host_set_flag=params.get("host_set_flag", 1),
                vf_filter_re=params.get("vf_filter_re"),
                pf_filter_re=params.get("pf_filter_re"),
                pa_type=params.get("pci_assignable"))

            # For Infiniband Controllers, we have to set the link
            # for the VF's before pass-through.
            cont = sriov_setup.get_controller_type()
            if cont == "Infiniband controller":
                sriov_setup.set_linkvf_ib()

            # Based on the PF Device specified, all the VF's
            # belonging to the same iommu group, will be
            # pass-throughed to the guest.
            pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2)
            pci_ids = sriov_setup.get_same_group_devs(pci_id)
            pci_devs = []
            for val in pci_ids:
                temp = val.replace(":", "_")
                pci_devs.extend(["pci_"+temp])
            pci_id = re.sub('[:.]', '_', pci_id)
            for val in pci_devs:
                val = val.replace(".", "_")
                # Get the virtual functions of the pci devices
                # which was generated above.
                pci_xml = NodedevXML.new_from_dumpxml(val)
                virt_functions = pci_xml.cap.virt_functions
                if not virt_functions:
                    test.fail("No Virtual Functions found.")
                for val in virt_functions:
                    pci_dev = utils_test.libvirt.pci_label_from_address(val,
                                                                        radix=16)
                    pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
                    pci_address = pci_xml.cap.get_address_dict()
                    vmxml.add_hostdev(pci_address)
        else:
            pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2)
            obj = PciAssignable()
            # get all functions id's
            pci_ids = obj.get_same_group_devs(pci_id)
            pci_devs = []
            for val in pci_ids:
                temp = val.replace(":", "_")
                pci_devs.extend(["pci_"+temp])
            pci_id = re.sub('[:.]', '_', pci_id)
            for val in pci_devs:
                val = val.replace(".", "_")
                pci_xml = NodedevXML.new_from_dumpxml(val)
                pci_address = pci_xml.cap.get_address_dict()
                vmxml.add_hostdev(pci_address)

    elif device_type == "STORAGE":
        # Store the result of "fdisk -l" in guest.
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
        fdisk_list_before = output.splitlines()

        pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
        pci_address = pci_xml.cap.get_address_dict()
        vmxml.add_hostdev(pci_address)
    try:
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        # The Network configuration is generic irrespective of PF or SRIOV VF
        if device_type == "NIC":
            output = session.cmd_output("lspci -nn")
            nic_list_after = output.splitlines()
            net_ip = netaddr.IPAddress(net_ip)
            if nic_list_after == nic_list_before:
                test.fail("passthrough Adapter not found in guest.")
            else:
                logging.debug("Adapter passthorughed to guest successfully")
            output = session.cmd_output("lspci -nn | grep %s" % device_name)
            nic_list = output.splitlines()
            for val in range(len(nic_list)):
                bus_info.append(str(nic_list[val]).split(' ', 1)[0])
                nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2]
            bus_info.sort()
            if not sriov:
                # check all functions get same iommu group
                if len(set(nic_list)) != 1:
                    test.fail("Multifunction Device passthroughed but "
                              "functions are in different iommu group")
            # ping to server from each function
            for val in bus_info:
                nic_name = str(utils_misc.get_interface_from_pci_id(val, session))
                session.cmd("ip addr flush dev %s" % nic_name)
                session.cmd("ip addr add %s/%s dev %s"
                            % (net_ip, netmask, nic_name))
                session.cmd("ip link set %s up" % nic_name)
                # Pinging using nic_name is having issue,
                # hence replaced with IPAddress
                s_ping, o_ping = utils_test.ping(server_ip, count=5,
                                                 interface=net_ip, timeout=30,
                                                 session=session)
                logging.info(o_ping)
                if s_ping != 0:
                    err_msg = "Ping test fails, error info: '%s'"
                    test.fail(err_msg % o_ping)
                # Each interface should have unique IP
                net_ip = net_ip + 1

        elif device_type == "STORAGE":
            # Get the result of "fdisk -l" in guest, and
            # compare the result with fdisk_list_before.
            output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
            fdisk_list_after = output.splitlines()
            if fdisk_list_after == fdisk_list_before:
                test.fail("Didn't find the disk attached to guest.")
    finally:
        backup_xml.sync()
        # For SR-IOV , VF's should be cleaned up in the post-processing.
        if sriov:
            sriov_setup.release_devs()
Пример #40
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported "
                    "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    test.error("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install("swtpm*"):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {"loader_readonly": "yes",
                         "secure": "yes",
                         "loader_type": "pflash",
                         "loader": loader,
                         "nvram": nvram}
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                        True, timeout=360, debug=True)
            if ret_clone.exit_status:
                test.error("Need more than one domains, but error occured when virt-clone.")
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s backend version xml for tpm dev "
                          "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append("-chardev.socket,id=chrtpm,"
                                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" % pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail('swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output("systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel("tpm-crb passthrough only works with host tpm2.0, "
                                    "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output("ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail("Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s", tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {"sec_ephemeral": "no",
                                         "sec_private": "yes",
                                         "sec_desc": "sample vTPM secret",
                                         "sec_usage": "vtpm",
                                         "sec_name": "VTPM_example"}
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name, ignore_status=True, debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
Пример #41
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """

    dev_obj = params.get("vadu_dev_objs")
    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            raise error.TestNAError("You libvirt version not supported"
                                    " attach/detach Serial devices")

    logging.info("Preparing initial VM state")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)
    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [globals()[class_name](test_params)  # instantiate
                    for class_name in test_params.devs]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params=test_params,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info("Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            test_params.main_vm.start()
        except virt_vm.VMStartError:
            raise error.TestFail('VM Failed to start for some reason!')
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params=test_params,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False,
                                    free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        test_params.cleanup(test_devices)
Пример #42
0
                pci_br_kwargs['index'] = new_index
                pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus)
                logging.debug('pci_br_kwargs: %s', pci_br_kwargs)

                pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs)
                vmxml.add_device(pcie_br)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test define & start an VM with/without pcie-to-pci-bridge
            if case.startswith('vm_with_pcie_br'):
                if case.endswith('1_br'):
                    pass
                elif case.endswith('multi_br'):
                    # Add $pcie_br_count pcie-root-port to vm
                    for i in range(pcie_br_count):
                        temp_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                        port = create_pci_device('pcie-root-port', 'pcie-root-port')
                        temp_xml.add_device(port)
                        temp_xml.sync()

                    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                    pci_root_ports = [dev for dev in vmxml.get_devices('controller')
                                      if dev.type == 'pci' and
                                      dev.model == 'pcie-root-port']
                    indexs = sorted([hex(int(dev.index)) for dev in pci_root_ports])
                    logging.debug('indexs: %s', indexs)

                    # Add $pcie_br_count pcie-to-pci-bridge to vm,
                    # on separated pcie-root-port
                    for i in range(pcie_br_count):
                        new_kwargs = {
def run(test, params, env):
    """
    Test DAC in save/restore domain to nfs pool.

    (1).Init variables for test.
    (2).Create nfs pool
    (3).Start VM and check result.
    (4).Save domain to the nfs pool.
    (5).Restore domain from the nfs file.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_save_restore_host_selinux",
                               "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options", "rw,async,no_root_squash")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool file variables
    pre_file = "yes" == params.get("pre_file", "yes")
    pre_file_name = params.get("pre_file_name", "dac_nfs_file")
    file_tup = ("file_user", "file_group", "file_mode")
    file_val = []
    for i in file_tup:
        try:
            file_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    file_user, file_group, file_mode = file_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    if vm.is_alive():
        vm.destroy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk mode to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for save/restore
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name,
                     pool_type,
                     pool_target,
                     emulated_image,
                     image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # Set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Create a file on nfs server dir.
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        server_file_path = os.path.join(nfs_path, pre_file_name)
        if pre_file and not os.path.exists(server_file_path):
            open(server_file_path, 'a').close()
        if not pre_file and os.path.exists(server_file_path):
            raise error.TestNAError("File %s already exist in pool %s" %
                                    (server_file_path, pool_name))

        # Get nfs mount file path
        mnt_path = os.path.join(tmp_dir, pool_target)
        mnt_file_path = os.path.join(mnt_path, pre_file_name)

        # Change img ownership and mode on nfs server dir
        if pre_file:
            os.chown(server_file_path, file_user, file_group)
            os.chmod(server_file_path, file_mode)

        # Start VM.
        try:
            vm.start()
            # Start VM successfully.
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            raise error.TestFail("Domain failed to start. " "error: %s" % e)

        label_before = check_ownership(server_file_path)
        if label_before:
            logging.debug("file ownership on nfs server before save: %s" %
                          label_before)

        # Save domain to nfs pool file
        save_re = virsh.save(vm_name, mnt_file_path, debug=True)
        if save_re.exit_status:
            if not status_error:
                raise error.TestFail("Failed to save domain to nfs pool file.")
        else:
            if status_error:
                raise error.TestFail("Save domain to nfs pool file succeeded, "
                                     "expected Fail.")

        label_after = check_ownership(server_file_path)
        if label_after:
            logging.debug("file ownership on nfs server after save: %s" %
                          label_after)

        # Restore domain from the nfs pool file
        if not save_re.exit_status:
            restore_re = virsh.restore(mnt_file_path, debug=True)
            if restore_re.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to restore domain from nfs "
                                         "pool file.")
            else:
                if status_error:
                    raise error.TestFail("Restore domain from nfs pool file "
                                         "succeeded, expected Fail.")

            label_after_rs = check_ownership(server_file_path)
            if label_after_rs:
                logging.debug(
                    "file ownership on nfs server after restore: %s" %
                    label_after_rs)
Пример #44
0
def run(test, params, env):
    """
    Test for USB device passthrough to libvirt guest.
        1. Get params.
        2. Store the result of 'lsusb' on guest.
        3. Passthrough USB device to guest.
        4. Start guest and get the result of 'lsusb' on guest.
        5. Compare the result of 'lsusb' before and after
           passthrough of USB device to guest.
    """
    def return_usbs_in_guest():
        """
        This function returns the usb devices found
        on guest system as a list variable
        """
        timeout = float(params.get("login_timeout", 240))
        session = vm.wait_for_login(timeout=timeout)
        if (session.cmd_status("lsusb")):
            session.close()
            raise error.TestNAError("SKIP:lsusb command has errored out,"
                                    "please fix any issues with lsusb command"
                                    " on guest")
        usb_list = session.cmd_output("lsusb|awk '{print $6}'")
        session.close()
        return (usb_list)

    def return_usbs_on_host():
        """
        This function returns the usb devices found
        on host system as a list variable
        """
        existing_usb = utils.run("lsusb",
                                 timeout=10,
                                 ignore_status='True',
                                 verbose=False)
        if existing_usb.exit_status != 0:
            raise error.TestNAError("SKIP: lsusb command has errored out,"
                                    "please fix any issues with lsusb command"
                                    " on host")
        return ((existing_usb.stdout.strip()).splitlines())

    device_type = params.get("usb_dev_label", 'all')
    # Check the parameters from configuration file.
    # pass_adapter would capture those card details
    # where passthrough test will takes place
    vm = env.get_vm(params["main_vm"])
    if not vm.is_alive():
        vm.start()
    lsusb_op_bef = return_usbs_in_guest()
    existing_usbs = return_usbs_on_host()
    if device_type == 'all':
        pass_usbs = []
        for usb in existing_usbs:
            if ((usb.split())[5]) in lsusb_op_bef:
                logging.info(
                    "%s already inuse within guest so skipping this"
                    "usb device from passthrough test", ((usb.split())[5]))
                continue
            pass_usbs.append(usb)
        if pass_usbs == '':
            raise error.TestNAError("No USB device found.")
    elif re.match(r'\w{4}:\w{4}', device_type):
        pass_usbs = []
        if not ([usb for usb in existing_usbs if device_type in usb]):
            raise error.TestNAError("Device passed is not a USB device")
        for usb in existing_usbs:
            if (((usb.split())[5]) == device_type):
                if ((usb.split())[5]) in lsusb_op_bef:
                    logging.info("%s inuse within guest,skipping this device",
                                 usb)
                else:
                    pass_usbs.append(usb)
    elif 'ENTER' in device_type:
        raise error.TestNAError("Please enter your device name for test.")
    else:
        raise error.TestNAError("Please enter proper value for device name")
    if pass_usbs == []:
        raise error.TestNAError(
            "No usb devices available or already in use within guest")
    logging.info("Passthrough will occur for following USB devices : %s",
                 pass_usbs)
    pass_reported = []
    fail_reported = []
    failures_reported = 0
    for usb_dev in pass_usbs:
        logging.info("Passthrough started for usb device %s", usb_dev)
        # Take backup of guest xml
        vmxml = VMXML.new_from_inactive_dumpxml(params.get("main_vm"))
        backup_xml = vmxml.copy()
        logging.info("USB devices within guest before passthrough: %s",
                     lsusb_op_bef)
        # Edit guest xml to add hostdev entries for diffrent ports
        usb_address = {}
        usb_address['bus'] = '0x' + (usb_dev.split())[1]
        usb_address['device'] = '0x' + ((usb_dev.split())[3]).strip(':')
        usb_address['vendor_id'] = '0x' + \
            (((usb_dev.split())[5]).split(":"))[0]
        usb_address['product_id'] = '0x' + \
            (((usb_dev.split())[5]).split(":"))[1]
        vmxml.add_hostdev(usb_address, 'subsystem', 'usb', 'yes')
        # Start the guest after passthrough compare pci/modules/device
        # details with before passthrough
        try:
            vmxml.sync()
            # Starting VM since we ran sync in previous step. Else we get
            # VMDeadError
            vm.start()
            lsusb_op_aft = return_usbs_in_guest()
            logging.info("USB devices within guest after passthrough: %s",
                         lsusb_op_aft)
            if lsusb_op_bef == lsusb_op_aft:
                failures_reported = 1
                logging.info("Passthrough failed for USB device %s", usb_dev)
                fail_reported.append(usb_dev)
            else:
                logging.info("Passthrough passed for USB device %s", usb_dev)
                pass_reported.append(usb_dev)
        finally:
            backup_xml.sync()
    logging.info("Summary of USB device passthrough test: ")
    logging.info("Passthrough failed for USB devices %s", fail_reported)
    logging.info("Passthrough passed for USB devices %s", pass_reported)
    if failures_reported:
        raise error.TestFail("USB device passthrough failed for one or more"
                             "devices, see above output for more details")
def run(test, params, env):
    """
    Test for PCI device passthrough to libvirt guest.

    a). NIC:
        1. Get params.
        2. Get the pci device for specific net_name.
        3. Attach pci device to guest.
        4. Start guest and set the ip of guest.
        5. Ping the server_ip of from guest
           to verify the new network device.
    b). STORAGE:
        1. Get params.
        2. Get the pci device for specific storage_dev_name.
        3. Store the result of 'fdisk -l' on guest.
        3. Attach pci device to guest.
        4. Start guest and get the result of 'fdisk -l' on guest.
        5. Compare the result of 'fdisk -l' before and after
            attaching storage pci device to guest.
    """
    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no'))
    device_type = params.get("libvirt_pci_device_type", "NIC")

    net_name = params.get("libvirt_pci_net_name", "eth0")
    server_ip = params.get("libvirt_pci_server_ip")

    storage_dev_name = params.get("libvirt_pci_storage_dev_name", "/dev/sdb")
    fdisk_list_before = None

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    pci_address = None
    if device_type == "NIC":
        # Find the pci device for given network device.
        result = virsh.nodedev_list(cap="net")
        nodedev_nets = result.stdout.strip().splitlines()
        device = None
        for nodedev in nodedev_nets:
            netxml = NodedevXML.new_from_dumpxml(nodedev)
            if netxml.cap.interface == net_name:
                device = nodedev
                break
        if not device:
            raise error.TestNAError("There is no network device name of %s." %
                                    net_name)
        pci_dev = netxml.parent

        if sriov:
            # set the parameter max_vfs of igb module to 7. Then we can use
            # the virtual function pci device for network device.

            # command 'modprobe -r igb' to unload igb module
            # command '&& modprobe igb max_vfs=7' to load it again
            #          with max_vfs=7
            # command '|| echo 'FAIL' > output_file' is a flag to mean
            #          modprobe igb with max_vfs=7 failed.
            # command '|| modprobe igb' is a handler of error occured
            #          when we load igb again. If command 2 failed,
            #          this command will be executed to recover network.
            output_file = os.path.join(test.tmpdir, "output")
            if os.path.exists(output_file):
                os.remove(output_file)
            mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||"
                       "echo 'FAIL' > %s && modprobe igb &" % output_file)
            result = utils.run(mod_cmd, ignore_status=True)
            if os.path.exists(output_file):
                raise error.TestError("Failed to modprobe igb with max_vfs=7.")
            # Get the virtual function pci device which was generated above.
            pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
            virt_functions = pci_xml.cap.virt_functions
            if not virt_functions:
                raise error.TestError("Init virtual function failed.")
            pci_address = virt_functions[0]
            pci_dev = utils_test.libvirt.pci_label_from_address(pci_address,
                                                                radix=16)

            # Find the network name (ethX) is using this pci device.
            network_service = service.Factory.create_service("network")
            network_service.restart()
            result = virsh.nodedev_list("net")
            nodedev_nets = result.stdout.strip().splitlines()
            device = None
            for nodedev in nodedev_nets:
                netxml = NodedevXML.new_from_dumpxml(nodedev)
                if netxml.parent == pci_dev:
                    device = nodedev
                    break
            if not device:
                raise error.TestNAError("There is no network name is using "
                                        "Virtual Function PCI device %s." %
                                        pci_dev)

        pci_xml = NodedevXML.new_from_dumpxml(pci_dev)
        pci_address = pci_xml.cap.get_address_dict()

    elif device_type == "STORAGE":
        # Store the result of "fdisk -l" in guest.
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
        fdisk_list_before = output.splitlines()

        result = virsh.nodedev_list(cap="storage")
        nodedev_storages = result.stdout.strip().splitlines()
        device = None
        for nodedev in nodedev_storages:
            storage_xml = NodedevXML.new_from_dumpxml(nodedev)
            if storage_xml.cap.block == storage_dev_name:
                device = nodedev
                break
        if not device:
            raise error.TestNAError("There is no block device name of %s." %
                                    storage_dev_name)
        pci_xml = NodedevXML.new_from_dumpxml(storage_xml.parent)

        # In some cases, the parent of target storage device might not be
        # a PCI device, but is of type 'scsi' for example.
        # SKIP these tests with a proper message.
        if pci_xml.cap_type != 'pci':
            raise error.TestNAError("The parent node device of the storage "
                                    "device need to be a PCI device. "
                                    "But parent of %s is a %s device." %
                                    (storage_dev_name, pci_xml.cap_type))
        pci_address = pci_xml.cap.get_address_dict()

    vmxml.add_hostdev(pci_address)

    try:
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        if device_type == "NIC":
            try:
                session.cmd("ping -c 4 %s" % server_ip)
            except aexpect.ShellError, detail:
                raise error.TestFail("Succeed to set ip on guest, but failed "
                                     "to ping server ip from guest.\n"
                                     "Detail: %s.", detail)
        elif device_type == "STORAGE":
            # Get the result of "fdisk -l" in guest, and compare the result with
            # fdisk_list_before.
            output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"")
            fdisk_list_after = output.splitlines()
            if fdisk_list_after == fdisk_list_before:
                raise error.TestFail("Didn't find the disk attached to guest.")
Пример #46
0
def run(test, params, env):
    """
    Verify hotplug feature for char device
    """

    vm_name = params.get("main_vm", "vm1")
    status_error = "yes" == params.get("status_error", "no")
    char_dev = params.get("char_dev", "file")
    hotplug_type = params.get("hotplug_type", "qmp")
    dup_charid = "yes" == params.get("dup_charid", "no")
    dup_devid = "yes" == params.get("dup_devid", "no")
    diff_devid = "yes" == params.get("diff_devid", "no")

    tmp_dir = os.path.join(test.tmpdir, "hotplug_serial")
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    os.chmod(tmp_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # add controller for each char device
    devices = vm_xml.get_devices()
    controllers = vm_xml.get_devices(device_type="controller")
    for dev in controllers:
        if dev.type == "virtio-serial":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "virtio-serial"
    controller.index = 0
    devices.append(controller)
    vm_xml.set_devices(devices)
    vm_xml.sync()

    # start and login vm
    vm = env.get_vm(vm_name)
    vm.start()
    session = vm.wait_for_login()

    def prepare_channel_xml(to_file, char_type, id=0):
        params = {}
        mode = ''
        if char_type == "file":
            channel_type = char_type
            channel_path = os.path.join(tmp_dir, char_type)
        elif char_type == "socket":
            channel_type = 'unix'
            channel_path = os.path.join(tmp_dir, char_type)
            mode = 'bind'
        elif char_type == "pty":
            channel_type = char_type
            channel_path = ("/dev/pts/%s" % id)
        params = {'channel_type_name': channel_type,
                  'source_path': channel_path,
                  'source_mode': mode,
                  'target_type': 'virtio',
                  'target_name': char_type}
        channel_xml = utlv.create_channel_xml(params, alias=True, address=True)
        shutil.copyfile(channel_xml.xml, to_file)

    def hotplug_device(type, char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                char_add_opt += "file,path=%s,id=file" % tmp_file
                dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
            elif char_dev == "socket":
                char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
            elif char_dev == "pty":
                char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
                dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
            result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to add chardev %s to %s. Result:\n %s'
                           % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to add device %s to %s. Result:\n %s'
                           % (char_dev, vm_name, result))
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            if char_dev in ["file", "socket"]:
                prepare_channel_xml(xml_file, char_dev)
            elif char_dev == "pty":
                prepare_channel_xml(xml_file, char_dev, id)
            result = virsh.attach_device(vm_name, xml_file)
            # serial device was introduced by the following commit,
            # http://libvirt.org/git/?
            # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
            if "unknown device type" in result.stderr:
                test.cancel('Failed to attach %s to %s. Result:\n %s'
                            % (char_dev, vm_name, result))
        return result

    def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                if dup_charid:
                    char_add_opt += "file,path=%s,id=file" % tmp_file
                if dup_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
                if diff_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
            elif char_dev == "socket":
                if dup_charid:
                    char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                if dup_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
                if diff_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
            elif char_dev == "pty":
                if dup_charid:
                    char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
                if dup_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
                if diff_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
            if dup_charid:
                result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if dup_devid or diff_devid:
                result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
        elif type == "attach":
            if dup_devid:
                result = hotplug_device(type, char_dev, id)
        return result

    def confirm_hotplug_result(char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        if not h_o.count("name = \"%s\"" % char_dev):
            test.fail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            with open(tmp_file, "r") as f:
                r_o = f.read()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            r_o = sock.recv(1024)
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                test.fail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file], universal_newlines=True,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
            session.cmd("echo test >> /tmp/file &")
            while True:
                r_o = p.stdout.readline()
                if r_o or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not r_o.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
            test.fail(err_info)

    def unhotplug_serial_device(type, char_dev):
        if type == "qmp":
            del_dev_opt = "device_del %s" % char_dev
            del_char_opt = "chardev-remove %s" % char_dev
            result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to del device %s from %s.Result:\n%s'
                           % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            result = virsh.detach_device(vm_name, xml_file)

    def confirm_unhotplug_result(char_dev):
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        uh_o = result.stdout.strip()
        if uh_o.count("chardev = \"%s\"" % char_dev):
            test.fail("Still can get serial device(%s) from: '%s'"
                      % (char_dev, uh_o))
        if os.path.exists(serial_file):
            test.fail("File '%s' still exists after unhotplug" % serial_file)

    # run test case
    try:
        if char_dev in ['file', 'socket']:
            # if char_dev is file or socket, it doesn't need pts index
            pts_id = 0
        else:
            pts_id = str(utils_misc.aton(utils_misc.get_dev_pts_max_id()) + 1)
            if os.path.exists("/dev/pts/%s" % pts_id):
                test.error('invalid pts index(%s) provided.' % pts_id)
        if status_error:
            hotplug_device(hotplug_type, char_dev, pts_id)
            ret = dup_hotplug(hotplug_type, char_dev, pts_id, dup_charid, dup_devid, diff_devid)
            dup_o = ret.stdout.strip()
            if hotplug_type == "qmp":
                # although it has failed, ret.exit_status will be returned 0.
                err_o1 = "Duplicate ID"
                err_o2 = "Parsing chardev args failed"
                err_o3 = "Property 'virtserialport.chardev' can't"
                if (err_o1 not in dup_o) and (err_o2 not in dup_o) and (err_o3 not in dup_o):
                    test.fail("Expect fail, but run successfully:\n%s" % ret)
            else:
                if "chardev already exists" not in dup_o:
                    logging.info("Expect fail,but run successfully:\n%s" % ret)
        else:
            if char_dev != "all":
                #1.hotplug serial device
                hotplug_device(hotplug_type, char_dev, pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result(char_dev, pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, char_dev)

                #4.confirm unhotplug result
                confirm_unhotplug_result(char_dev)
            else:
                #1.hotplug serial device
                hotplug_device(hotplug_type, "file")
                hotplug_device(hotplug_type, "socket")
                hotplug_device(hotplug_type, "pty", pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result("file")
                confirm_hotplug_result("socket")
                confirm_hotplug_result("pty", pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, "file")
                unhotplug_serial_device(hotplug_type, "socket")
                unhotplug_serial_device(hotplug_type, "pty")

                #4.confirm unhotplug result
                confirm_unhotplug_result("file")
                confirm_unhotplug_result("socket")
                confirm_unhotplug_result("pty")
    finally:
        vm_xml_backup.sync()
        if os.path.exists(tmp_dir):
            shutil.rmtree(tmp_dir)
Пример #47
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Save VM and check the context.
    (4).Restore VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_save_restore_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_save_restore_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_save_restore_vm_sec_model", "selinux")
    sec_label = params.get("svirt_save_restore_vm_sec_label", None)
    sec_relabel = params.get("svirt_save_restore_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_save_restore_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    # Init a path to save VM.
    save_path = os.path.join(data_dir.get_tmp_dir(), "svirt_save")
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            vm.save_to_file(path=save_path)
            vm.restore_from_file(path=save_path)
            # Save and restore VM successfully.
            if status_error:
                test.fail("Test succeeded in negative case.")
        except virt_vm.VMError as e:
            if not status_error:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                if str(e).count("getfd"):
                    error_msg += ("More info pleass refer to"
                                  " https://bugzilla.redhat.com/show_bug.cgi?id=976632")
                test.fail(error_msg)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Пример #48
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    qemu_no_usr_grp = "yes" == params.get("qemu_no_usr_grp", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in list(disks.values()):
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        process.run(cmd, ignore_status=False, shell=True)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake", test):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake", test):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(
                qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        test.fail("Label of VM processs is not "
                                  "expected after starting.\nDetail:"
                                  "vm_context=%s, sec_label_trans=%s" %
                                  (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                test.fail("Label of disk is not " +
                                          "expected" +
                                          " after VM starting.\n" +
                                          "Detail: disk_context" +
                                          "=%s" % disk_context +
                                          ", sec_label_trans=%s." %
                                          sec_label_trans)
            elif (set_qemu_conf and not security_default_confined
                  and not qemu_no_usr_grp):
                if vm_context != qemu_conf_label_trans:
                    test.fail("Label of VM processs is not expected"
                              " after starting.\nDetail: vm_context="
                              "%s, qemu_conf_label_trans=%s" %
                              (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        test.fail("Label of disk is not expected " +
                                  "after VM starting.\nDetail: di" +
                                  "sk_context=%s, " % disk_context +
                                  "qemu_conf_label_trans=%s." %
                                  qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                if libvirt_version.version_compare(1, 3, 5):
                    chk_str = "-name guest=%s,process=qemu:%s" % (vm_name,
                                                                  vm_name)
                else:
                    chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = process.run(cmd, shell=True)
                if chk_str in result.stdout:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout))
                else:
                    test.fail("%s not in vm process command: %s" %
                              (chk_str, result.stdout))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(list(disks.values())[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        test.fail("Label of disk is img_label_after"
                                  ":%s" % img_label_after + ", it "
                                  "did not restore to 0:0 in VM "
                                  "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        test.fail("Bug: Label of disk is changed\n"
                                  "Detail: img_label_after=%s, "
                                  "img_label=%s.\n" %
                                  (img_label_after, img_label))
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start failed as expected, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf and not qemu_no_usr_grp:
                                if qemu_conf_label_trans == "107:107":
                                    logging.debug(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                logging.debug(err_msg)
                else:
                    test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = process.run(cmd, ignore_status=True, shell=True)
        utils_selinux.set_status(backup_sestatus)
Пример #49
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set image and
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    (4) Destroy VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
    qemu_group_user = "******" == params.get("qemu_group_user", "no")
    # Get variables about seclabel for VM.
    sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
    sec_label = params.get("dac_start_destroy_vm_sec_label", None)
    sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
    security_default_confined = params.get("security_default_confined", None)
    set_process_name = params.get("set_process_name", None)
    sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
    if sec_label:
        sec_dict['label'] = sec_label
    set_sec_label = "yes" == params.get("set_sec_label", "no")
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", None)
    qemu_group = params.get("qemu_group", None)
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('dac_start_destroy_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    qemu_disk_mod = False
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        label_list = img_label.split(":")
        os.chown(disk_path, int(label_list[0]), int(label_list[1]))
        os.close(f)
        st = os.stat(disk_path)
        if not bool(st.st_mode & stat.S_IWGRP):
            # add group wirte mode to disk by chmod g+w
            os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
            qemu_disk_mod = True

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    def _create_user():
        """
        Create a "vdsm_fake" in 'qemu' group for test
        """
        logging.debug("create a user 'vdsm_fake' in 'qemu' group")
        cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
        utils.run(cmd, ignore_status=False)

    create_qemu_user = False
    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Check qemu_group_user
        if qemu_group_user:
            if set_qemu_conf:
                if "EXAMPLE" in qemu_user:
                    if not check_qemu_grp_user("vdsm_fake"):
                        _create_user()
                        create_qemu_user = True
                    qemu_user = "******"
                    qemu_group = "qemu"
            if set_sec_label:
                if sec_label:
                    if "EXAMPLE" in sec_label:
                        if not check_qemu_grp_user("vdsm_fake"):
                            _create_user()
                            create_qemu_user = True
                        sec_label = "vdsm_fake:qemu"
                        sec_dict['label'] = sec_label
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        if set_qemu_conf:
            # Transform qemu user and group to "uid:gid"
            qemu_user = qemu_user.replace("+", "")
            qemu_group = qemu_group.replace("+", "")
            qemu_conf_label_trans = format_user_group_str(qemu_user, qemu_group)

            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            if security_default_confined:
                qemu_conf.security_default_confined = security_default_confined
            if set_process_name:
                qemu_conf.set_process_name = set_process_name
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if set_sec_label:
            # Transform seclabel to "uid:gid"
            if sec_label:
                sec_label = sec_label.replace("+", "")
                if ":" in sec_label:
                    user, group = sec_label.split(":")
                    sec_label_trans = format_user_group_str(user, group)

            # Set the context of the VM.
            logging.debug("sec_dict is %s" % sec_dict)
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()
            logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the qemu process and image.
        try:
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                raise error.TestFail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)

            # Get vm image label when VM is running
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)

            # Check vm process and image DAC label after vm start
            if set_sec_label and sec_label:
                if ":" in sec_label:
                    if vm_context != sec_label_trans:
                        raise error.TestFail("Label of VM processs is not "
                                             "expected after starting.\nDetail:"
                                             "vm_context=%s, sec_label_trans=%s"
                                             % (vm_context, sec_label_trans))
                    if sec_relabel == "yes":
                        if dynamic_ownership:
                            if disk_context != sec_label_trans:
                                raise error.TestFail("Label of disk is not " +
                                                     "expected" +
                                                     " after VM starting.\n" +
                                                     "Detail: disk_context" +
                                                     "=%s" % disk_context +
                                                     ", sec_label_trans=%s."
                                                     % sec_label_trans)
            elif set_qemu_conf and not security_default_confined:
                if vm_context != qemu_conf_label_trans:
                    raise error.TestFail("Label of VM processs is not expected"
                                         " after starting.\nDetail: vm_context="
                                         "%s, qemu_conf_label_trans=%s"
                                         % (vm_context, qemu_conf_label_trans))
                if disk_context != qemu_conf_label_trans:
                    if dynamic_ownership:
                        raise error.TestFail("Label of disk is not expected " +
                                             "after VM starting.\nDetail: di" +
                                             "sk_context=%s, " % disk_context +
                                             "qemu_conf_label_trans=%s." %
                                             qemu_conf_label_trans)

            # check vm started with -name $vm_name,process=qemu:$vm_name
            if set_process_name:
                chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
                cmd = "ps -p %s -o command=" % vm_pid
                result = utils.run(cmd)
                if chk_str in result.stdout:
                    logging.debug("%s found in vm process command: %s" %
                                  (chk_str, result.stdout))
                else:
                    raise error.TestFail("%s not in vm process command: %s" %
                                         (chk_str, result.stdout))

            # Check the label of disk after VM being destroyed.
            vm.destroy()
            f = os.open(disks.values()[0]['source'], 0)
            stat_re = os.fstat(f)
            img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            os.close(f)
            if set_sec_label and sec_relabel == "yes":
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when sec_relabel enabled.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        raise error.TestFail("Label of disk is img_label_after"
                                             ":%s" % img_label_after + ", it "
                                             "did not restore to 0:0 in VM "
                                             "shuting down.")
            elif set_qemu_conf and not set_sec_label:
                # As dynamic_ownership as 1 on non-share fs, current domain
                # image will restore to 0:0 when only set qemu.conf.
                if dynamic_ownership:
                    if not img_label_after == "0:0":
                        raise error.TestFail("Label of disk is img_label_after"
                                             ":%s" % img_label_after + ", it "
                                             "did not restore to 0:0 in VM "
                                             "shuting down.")
                else:
                    if (not img_label_after == img_label):
                        raise error.TestFail("Bug: Label of disk is changed\n"
                                             "Detail: img_label_after=%s, "
                                             "img_label=%s.\n"
                                             % (img_label_after, img_label))
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                err_msg = "Domain start fail not due to  DAC setting, check "
                err_msg += "more in https://bugzilla.redhat.com/show_bug"
                err_msg += ".cgi?id=856951"
                if set_sec_label:
                    if sec_label:
                        if sec_relabel == "yes" and sec_label_trans == "0:0":
                            if set_qemu_conf:
                                if qemu_conf_label_trans == "107:107":
                                    raise error.TestNAError(err_msg)
                        elif sec_relabel == "no" and sec_label_trans == "0:0":
                            if not set_qemu_conf:
                                raise error.TestNAError(err_msg)
                else:
                    raise error.TestFail("Test failed in positive case."
                                         "error: %s" % e)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
            if qemu_disk_mod:
                st = os.stat(path)
                os.chmod(path, st.st_mode ^ stat.S_IWGRP)
        if set_sec_label:
            backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        if create_qemu_user:
            cmd = "userdel -r vdsm_fake"
            output = utils.run(cmd, ignore_status=True)
        utils_selinux.set_status(backup_sestatus)
Пример #50
0
def run_libvirt_network_bandwidth(test, params, env):
    """
    Test for network bandwidth in libvirt.

    1. Preparation:
        * Init variables from params.
        * Keep a backup for vmxml and networkxml.
        * Build a file with dd command.
    2. Edit vmxml and networkxml to control the bandwidth.
    3. Verify the bandwidth with scp.
    4. Clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    inbound_average = params.get("LNB_inbound_average", "512")
    inbound_peak = params.get("LNB_inbound_peak", "512")
    inbound_burst = params.get("LNB_inbound_burst", "32")

    outbound_average = params.get("LNB_outbound_average", "512")
    outbound_peak = params.get("LNB_outbound_peak", "512")
    outbound_burst = params.get("LNB_outbound_burst", "32")

    config_type = params.get("LNB_config_type", "network")

    bandwidth_tolerance = float(params.get("LNB_bandwidth_tolerance", "20")) / 100

    file_size = params.get("LNB_verify_file_size", "10")

    nic1_params = params.object_params('nic1')
    nettype = params.get('nettype')
    netdst = params.get('netdst')

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # This test assume that VM is using default network.
    # Check the interfaces of VM to make sure default network
    # is used by VM.
    interfaces = vm_xml.get_devices(device_type="interface")
    # interface which is using default network.
    default_interface = None
    for interface in interfaces:
        if interface.source == {nettype: netdst}:
            default_interface = interface
            break
    if not default_interface:
        raise error.TestNAError("VM is not using default network,"
                                "skip this test.")

    bandwidth_inbound = {'average': inbound_average,
                         'peak': inbound_peak,
                         'burst': inbound_burst}
    bandwidth_outbound = {'average': outbound_average,
                          'peak': outbound_peak,
                          'burst': outbound_burst}

    network_xml = NetworkXML.new_from_net_dumpxml("default")
    network_xml_backup = network_xml.copy()

    tmp_dir = data_dir.get_tmp_dir()
    file_path = os.path.join(tmp_dir, "scp_file")
    # Init a QemuImg instance.
    cmd = "dd if=/dev/zero of=%s bs=1M count=%s" % (file_path, file_size)
    utils.run(cmd)
    try:
        if config_type == "network":
            network_xml.bandwidth_inbound = bandwidth_inbound
            network_xml.bandwidth_outbound = bandwidth_outbound
            network_xml.sync()
        elif config_type == "interface":
            devices = vm_xml.devices
            for index in range(len(devices)):
                if not (devices[index].device_tag ==
                        default_interface.device_tag):
                    continue
                if devices[index].mac_address == default_interface.mac_address:
                    default_interface.bandwidth_inbound = bandwidth_inbound
                    default_interface.bandwidth_outbound = bandwidth_outbound
                    devices[index] = default_interface
                    break
            vm_xml.devices = devices
            vm_xml.sync()
        elif config_type == "portgroup":
            # Add a portgroup into default network
            portgroup_name = "test_portgroup"
            portgroup = PortgroupXML()
            portgroup.name = portgroup_name
            portgroup.bandwidth_inbound = bandwidth_inbound
            portgroup.bandwidth_outbound = bandwidth_outbound
            network_xml.portgroup = portgroup
            network_xml.sync()
            # Using the portgroup in VM.
            devices = vm_xml.devices
            for index in range(len(devices)):
                if not (devices[index].device_tag ==
                        default_interface.device_tag):
                    continue
                if devices[index].mac_address == default_interface.mac_address:
                    default_interface.portgroup = portgroup_name
                    devices[index] = default_interface
                    break
            vm_xml.devices = devices
            vm_xml.sync()
        else:
            raise error.TestNAError("Unsupported parameter config_type=%s." %
                                    config_type)

        # SCP to check the network bandwidth.
        if vm.is_alive():
            vm.destroy()
        vm.start()
        vm.wait_for_login()
        time_before = time.time()
        vm.copy_files_to(host_path=file_path, guest_path="/root")
        time_after = time.time()

        speed_expected = int(inbound_average)
        speed_actual = (10 * 1024 / (time_after - time_before))
        if not (abs(speed_actual - speed_expected) <=
                speed_expected * bandwidth_tolerance):
            raise error.TestFail("Speed from host to guest is %s.\n"
                                 "But the average of bandwidth.inbound is %s.\n"
                                 % (speed_actual, speed_expected))
        time_before = time.time()
        vm.copy_files_from(host_path=file_path, guest_path="/root/scp_file")
        time_after = time.time()

        speed_expected = int(outbound_average)
        speed_actual = (10 * 1024 / (time_after - time_before))
        if not (abs(speed_actual - speed_expected) <=
                speed_expected * bandwidth_tolerance):
            raise error.TestFail("Speed from guest to host is %s.\n"
                                 "But the average of bandwidth.outbound is %s\n"
                                 % (speed_actual, speed_expected))

    finally:
        if os.path.exists(file_path):
            os.remove(file_path)
        network_xml_backup.sync()
        vm_xml_backup.sync()
Пример #51
0
def run(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controllers = []
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controllers.append(device)

    if not scsi_controllers:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

    # Add disk with bus of scsi into vmxml.
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test "
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg",
                                 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {'dev': partition}})
        vmxml.add_device(partition_disk)
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.target = {'dev': "vde",
                           'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        try:
            cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
            cdrom.close()
        except process.CmdError, detail:
            raise error.TestNAError("Failed to create cdrom disk: %s" % detail)

        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf",
                             'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {'file': cdrom_path}})
        vmxml.add_device(cdrom_disk)
Пример #52
0
def run(test, params, env):
    """
    Test stdio_handler parameter in qemu.conf to use for handling stdout/stderr
    output from QEMU processes.

    1) Change stdio_handler in qemu.conf;
    2) Restart libvirtd daemon;
    3) Check if libvirtd successfully started;
    4) Check if virtlogd.socket is running;
    5) Configure pty serial and console;
    6) Check if VM log file exists and has correct permission and owner;
    7) Check if VM log file is opened by virtlogd;
    8) Check if VM start log is written into VM log file correctly;
    9) Check if QEMU use pipe provided by virtlogd daemon for logging;
    10) Check if VM shutdown log is written into VM log file correctly;
    11) Check if pipe file can be closed gracefully after VM shutdown;
    12) Check if VM restart log can be appended to the end of previous log file;
    """
    def clean_up_vm_log_file(vm_name, guest_log_file):
        """
        Clean up VM log file.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        """
        # Delete VM log file if exists.
        if os.path.exists(guest_log_file):
            os.remove(guest_log_file)

    def configure(cmd, guest_log_file=None, errorMsg=None):
        """
        Configure qemu log.

        :param cmd: execute command string.
        :param guest_log_file: the path of VM log file.
        :param errorMsg: error message if failed
        :return: pipe node.
        """
        # If guest_log_file is not None,check if VM log file exists or not.
        if guest_log_file and not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        # If errorMsg is not None, check command running result.
        elif errorMsg:
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.error(errorMsg)
        # Get pipe node.
        else:
            result = process.run(cmd,
                                 timeout=90,
                                 ignore_status=True,
                                 shell=True)
            ret, output = result.exit_status, result.stdout_text
            if ret:
                test.fail("Failed to get pipe node")
            else:
                return output

    def configure_serial_console(vm_name, dev_type, guest_log_file=None):
        """
        Configure serial console.

        :params vm_name: guest name
        :params dev_type: device type
        :params guest_log_file: the path of VM log file
        """
        guest_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        guest_xml.remove_all_device_by_type('serial')
        guest_xml.remove_all_device_by_type('console')

        serial = Serial(dev_type)
        serial.target_port = '0'

        console = Console(dev_type)
        console.target_port = '0'
        console.target_type = 'serial'

        if dev_type == "file" and guest_log_file is not None:
            serial.sources = console.sources = [{
                'path': guest_log_file,
                'append': 'off'
            }]
        guest_xml.add_device(serial)
        guest_xml.add_device(console)
        guest_xml.sync()

    def check_vm_log_file_permission_and_owner(vm_name, guest_log_file):
        """
        Check VM log file permission and owner.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        """
        # Check VM log file permission.
        logging.info("guest log file: %s", guest_log_file)
        if not os.path.exists(guest_log_file):
            test.error("Expected VM log file: %s not exists" % guest_log_file)
        permission = oct(stat.S_IMODE(os.lstat(guest_log_file).st_mode))
        if permission != '0600' and permission != '0o600':
            test.fail(
                "VM log file: %s expect to get permission:0600, got %s ." %
                (guest_log_file, permission))
        # Check VM log file owner.
        owner = getpwuid(stat.S_IMODE(os.lstat(guest_log_file).st_uid)).pw_name
        if owner != 'root':
            test.fail("VM log file: %s expect to get owner:root, got %s ." %
                      (guest_log_file, owner))

    def check_info_in_vm_log_file(vm_name,
                                  guest_log_file,
                                  cmd=None,
                                  matchedMsg=None):
        """
        Check if log information is written into log file correctly.

        :params vm_name: guest name
        :params guest_log_file: the path of VM log file
        :params cmd: execute command string
        :params matchedMsg: match message
        """
        # Check VM log file is opened by virtlogd.
        if not os.path.exists(guest_log_file):
            test.fail("Expected VM log file: %s not exists" % guest_log_file)

        if cmd is None:
            cmd = ("grep -nr '%s' %s" % (matchedMsg, guest_log_file))
        else:
            cmd = (cmd + " %s |grep '%s'" % (guest_log_file, matchedMsg))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Failed to get VM started log from VM log file: %s." %
                      guest_log_file)

    def check_pipe_closed(pipe_node):
        """
        Check pipe used by QEMU is closed gracefully after VM shutdown.
        """
        # Check pipe node can not be listed after VM shutdown.
        cmd = ("lsof  -w |grep pipe|grep virtlogd|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in virtlogd gracefully." %
                      pipe_node)

        cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
        if not process.run(cmd, timeout=90, ignore_status=True,
                           shell=True).exit_status:
            test.fail("pipe node: %s is not closed in qemu gracefully." %
                      pipe_node)

    def check_service_status(service_name, service_start=False):
        """
        Check service status and return service PID

        :param service_name: service name
        :param service_start: whether to start service or not
        :return: service PID
        """
        # Check service status
        cmd = ("systemctl status %s | grep 'Active: active'" % service_name)
        ret = process.run(cmd, ignore_status=True, shell=True, verbose=True)
        if ret.exit_status:
            # If service isn't active and setting 'service_start', start service.
            if service_start:
                ret = process.run("systemctl start %s" % service_name,
                                  shell=True)
                if ret.exit_status:
                    test.fail("%s start failed." % service_name)
            # If service isn't active and don't set 'service_start', return error.
            else:
                test.fail("%s is not active." % service_name)
        cmd = ("systemctl status %s | grep 'Main PID:'" % service_name)
        ret = process.run(cmd, ignore_status=True, shell=True, verbose=True)
        if ret.exit_status:
            test.fail("Get %s status failed." % service_name)
        return ret.stdout_text.split()[2]

    def reload_and_check_virtlogd():
        """
        Reload and check virtlogd status
        """
        virtlogd_pid = check_service_status("virtlogd", service_start=True)
        logging.info("virtlogd PID: %s", virtlogd_pid)
        ret = process.run("systemctl reload virtlogd", shell=True)
        if ret.exit_status:
            test.fail("virtlogd reload failed.")
        reload_virtlogd_pid = check_service_status("virtlogd",
                                                   service_start=True)
        logging.info("After reload, virtlogd PID: %s", reload_virtlogd_pid)
        if virtlogd_pid != reload_virtlogd_pid:
            test.fail("After reload, virtlogd PID changed.")

    def configure_spice(vm_name):
        """
        Configure spice

        :params vm_name: guest name
        """
        vm_spice_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        vm_spice_xml.remove_all_device_by_type('graphics')

        graphic = Graphics(type_name='spice')
        graphic.autoport = "yes"
        graphic.port = "-1"
        graphic.tlsPort = "-1"
        vm_spice_xml.add_device(graphic)
        vm_spice_xml.sync()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    expected_result = params.get("expected_result", "virtlogd_disabled")
    stdio_handler = params.get("stdio_handler", "not_set")
    start_vm = "yes" == params.get("start_vm", "yes")
    reload_virtlogd = "yes" == params.get("reload_virtlogd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    with_spice = "yes" == params.get("with_spice", "no")
    with_console_log = "yes" == params.get("with_console_log", "no")

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if with_console_log:
        guest_log_file = os.path.join(QEMU_LOG_PATH,
                                      "%s-console.log" % vm_name)
    else:
        guest_log_file = os.path.join(QEMU_LOG_PATH, "%s.log" % vm_name)

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if stdio_handler != 'not_set':
            config['stdio_handler'] = "'%s'" % stdio_handler
        if restart_libvirtd:
            virtlogd_pid = check_service_status("virtlogd", service_start=True)
            logging.info("virtlogd pid: %s", virtlogd_pid)
            check_service_status("libvirtd", service_start=True)

        # Restart libvirtd to make change valid.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                test.fail('Libvirtd is expected to be started '
                          'with stdio_handler=%s' % stdio_handler)
            return
        if expected_result == 'unbootable':
            test.fail('Libvirtd is not expected to be started '
                      'with stdio_handler=%s' % stdio_handler)

        if not start_vm:
            if reload_virtlogd:
                reload_and_check_virtlogd()
        else:
            # Stop all VMs if VMs are already started.
            for tmp_vm in env.get_all_vms():
                if tmp_vm.is_alive():
                    tmp_vm.destroy(gracefully=False)

            # Sleep a few seconds to let VM syn underlying data
            time.sleep(3)

            # Remove VM previous log file.
            clean_up_vm_log_file(vm_name, guest_log_file)

            # Check if virtlogd socket is running.
            cmd = ("systemctl status virtlogd.socket|grep 'Active: active'")
            configure(cmd, errorMsg="virtlogd.socket is not running")

            # Configure spice
            if with_spice:
                configure_spice(vm_name)

            # Configure serial console.
            if with_console_log:
                configure_serial_console(vm_name, 'file', guest_log_file)
            else:
                configure_serial_console(vm_name, 'pty')

            logging.info("final vm:")
            logging.info(VMXML.new_from_inactive_dumpxml(vm_name))

            # Start VM.
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                test.fail("VM failed to start." "Error: %s" % str(detail))
            # Wait for write log to console log file
            if with_console_log:
                vm.wait_for_login()

            # Check VM log file has right permission and owner.
            check_vm_log_file_permission_and_owner(vm_name, guest_log_file)
            utils_package.package_install(['lsof'])
            # Check VM log file is opened by virtlogd.
            cmd = ("lsof -w %s|grep 'virtlogd'" % guest_log_file)
            errorMessage = "VM log file: %s is not opened by:virtlogd." % guest_log_file
            configure(cmd, guest_log_file, errorMessage)

            # Check VM started log is written into log file correctly.
            if not with_console_log:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    matchedMsg="char device redirected to /dev/pts")

            # Get pipe node opened by virtlogd for VM log file.
            pipe_node_field = "$9"
            # On latest release,No.8 field in lsof returning is pipe node number.
            if libvirt_version.version_compare(4, 3, 0):
                pipe_node_field = "$8"
            cmd = (
                "lsof  -w |grep pipe|grep virtlogd|tail -n 1|awk '{print %s}'"
                % pipe_node_field)
            pipe_node = configure(cmd)

            if restart_libvirtd:
                libvirtd.restart()
                new_virtlogd_pid = check_service_status("virtlogd",
                                                        service_start=True)
                logging.info("After libvirtd restart, virtlogd PID: %s",
                             new_virtlogd_pid)
                new_pipe_node = configure(cmd)
                logging.info("After libvirtd restart, pipe node: %s",
                             new_pipe_node)
                if pipe_node != new_pipe_node and new_pipe_node != new_virtlogd_pid:
                    test.fail("After libvirtd restart, pipe node changed.")

            if with_spice or with_console_log:
                reload_and_check_virtlogd()

            # Check if qemu-kvm use pipe node provided by virtlogd.
            cmd = ("lsof  -w |grep pipe|grep qemu-kvm|grep %s" % pipe_node)
            errorMessage = ("Can not find matched pipe node: %s "
                            "from pipe list used by qemu-kvm." % pipe_node)
            configure(cmd, errorMsg=errorMessage)

            # Shutdown VM.
            if not vm.shutdown():
                vm.destroy(gracefully=True)

            # Check qemu log works well
            if with_spice:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    matchedMsg="qemu-kvm: terminating on signal 15 from pid")

            # Check VM shutdown log is written into log file correctly.
            if with_console_log:
                check_info_in_vm_log_file(vm_name,
                                          guest_log_file,
                                          matchedMsg="Powering off")
            else:
                check_info_in_vm_log_file(vm_name,
                                          guest_log_file,
                                          matchedMsg="shutting down")

            # Check pipe is closed gracefully after VM shutdown.
            check_pipe_closed(pipe_node)

            # Start VM again.
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                test.fail("VM failed to start." "Error: %s" % str(detail))
            # Check the new VM start log is appended to the end of the VM log file.
            if not with_console_log:
                check_info_in_vm_log_file(
                    vm_name,
                    guest_log_file,
                    cmd="tail -n 5",
                    matchedMsg="char device redirected to /dev/pts")

    finally:
        config.restore()
        libvirtd.restart()
        vm_xml_backup.sync()
Пример #53
0
def run(test, params, env):
    """
    Verify hotplug feature for char device
    """

    vm_name = params.get("main_vm", "vm1")
    status_error = "yes" == params.get("status_error", "no")
    char_dev = params.get("char_dev", "file")
    hotplug_type = params.get("hotplug_type", "qmp")
    dup_charid = "yes" == params.get("dup_charid", "no")
    dup_devid = "yes" == params.get("dup_devid", "no")
    diff_devid = "yes" == params.get("diff_devid", "no")

    tmp_dir = os.path.join(test.tmpdir, "hotplug_serial")
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    os.chmod(tmp_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # add controller for each char device
    devices = vm_xml.get_devices()
    controllers = vm_xml.get_devices(device_type="controller")
    for dev in controllers:
        if dev.type == "virtio-serial":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "virtio-serial"
    controller.index = 0
    devices.append(controller)
    vm_xml.set_devices(devices)
    vm_xml.sync()

    # start and login vm
    vm = env.get_vm(vm_name)
    vm.start()
    session = vm.wait_for_login()

    def prepare_channel_xml(to_file, char_type, id=0):
        params = {}
        mode = ''
        if char_type == "file":
            channel_type = char_type
            channel_path = os.path.join(tmp_dir, char_type)
        elif char_type == "socket":
            channel_type = 'unix'
            channel_path = os.path.join(tmp_dir, char_type)
            mode = 'bind'
        elif char_type == "pty":
            channel_type = char_type
            channel_path = ("/dev/pts/%s" % id)
        params = {'channel_type_name': channel_type,
                  'source_path': channel_path,
                  'source_mode': mode,
                  'target_type': 'virtio',
                  'target_name': char_type}
        channel_xml = utlv.create_channel_xml(params, alias=True, address=True)
        shutil.copyfile(channel_xml.xml, to_file)

    def hotplug_device(type, char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                char_add_opt += "file,path=%s,id=file" % tmp_file
                dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
            elif char_dev == "socket":
                char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
            elif char_dev == "pty":
                char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
                dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
            result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add chardev %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add device %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            if char_dev in ["file", "socket"]:
                prepare_channel_xml(xml_file, char_dev)
            elif char_dev == "pty":
                prepare_channel_xml(xml_file, char_dev, id)
            result = virsh.attach_device(vm_name, xml_file)
            # serial device was introduced by the following commit,
            # http://libvirt.org/git/?
            # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
            if "unknown device type" in result.stderr:
                raise error.TestNAError('Failed to attach %s to %s. Result:\n %s'
                                        % (char_dev, vm_name, result))
        return result

    def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                if dup_charid:
                    char_add_opt += "file,path=%s,id=file" % tmp_file
                if dup_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
                if diff_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
            elif char_dev == "socket":
                if dup_charid:
                    char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                if dup_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
                if diff_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
            elif char_dev == "pty":
                if dup_charid:
                    char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
                if dup_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
                if diff_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
            if dup_charid:
                result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if dup_devid or diff_devid:
                result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
        elif type == "attach":
            if dup_devid:
                result = hotplug_device(type, char_dev, id)
        return result

    def confirm_hotplug_result(char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        if not h_o.count("name = \"%s\"" % char_dev):
            raise error.TestFail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            f = open(tmp_file, "r")
            r_o = f.read()
            f.close()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            r_o = sock.recv(1024)
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                raise error.TestFail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file],
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            session.cmd("echo test >> /tmp/file &")
            while True:
                r_o = p.stdout.readline()
                if r_o or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not r_o.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
            raise error.TestFail(err_info)

    def unhotplug_serial_device(type, char_dev):
        if type == "qmp":
            del_dev_opt = "device_del %s" % char_dev
            del_char_opt = "chardev-remove %s" % char_dev
            result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to del device %s from %s.Result:\n%s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            result = virsh.detach_device(vm_name, xml_file)

    def confirm_unhotplug_result(char_dev):
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        uh_o = result.stdout.strip()
        if uh_o.count("chardev = \"%s\"" % char_dev):
            raise error.TestFail("Still can get serial device(%s) from: '%s'"
                                 % (char_dev, uh_o))
        if os.path.exists(serial_file):
            raise error.TestFail("File '%s' still exists after unhotplug" % serial_file)

    # run test case
    try:
        if char_dev in ['file', 'socket']:
            # if char_dev is file or socket, it doesn't need pts index
            pts_id = 0
        else:
            pts_id = str(utils_misc.aton(utils_misc.get_dev_pts_max_id()) + 1)
            if os.path.exists("/dev/pts/%s" % pts_id):
                raise error.TestError('invalid pts index(%s) provided.' % pts_id)
        if status_error:
            hotplug_device(hotplug_type, char_dev, pts_id)
            ret = dup_hotplug(hotplug_type, char_dev, pts_id, dup_charid, dup_devid, diff_devid)
            dup_o = ret.stdout.strip()
            if hotplug_type == "qmp":
                # although it has failed, ret.exit_status will be returned 0.
                err_o1 = "Duplicate ID"
                err_o2 = "Parsing chardev args failed"
                err_o3 = "Property 'virtserialport.chardev' can't"
                if (err_o1 not in dup_o) and (err_o2 not in dup_o) and (err_o3 not in dup_o):
                    raise error.TestFail("Expect fail, but run successfully:\n%s" % ret)
            else:
                if "chardev already exists" not in dup_o:
                    logging.info("Expect fail,but run successfully:\n%s" % ret)
        else:
            if char_dev != "all":
                #1.hotplug serial device
                hotplug_device(hotplug_type, char_dev, pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result(char_dev, pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, char_dev)

                #4.confirm unhotplug result
                confirm_unhotplug_result(char_dev)
            else:
                #1.hotplug serial device
                hotplug_device(hotplug_type, "file")
                hotplug_device(hotplug_type, "socket")
                hotplug_device(hotplug_type, "pty", pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result("file")
                confirm_hotplug_result("socket")
                confirm_hotplug_result("pty", pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, "file")
                unhotplug_serial_device(hotplug_type, "socket")
                unhotplug_serial_device(hotplug_type, "pty")

                #4.confirm unhotplug result
                confirm_unhotplug_result("file")
                confirm_unhotplug_result("socket")
                confirm_unhotplug_result("pty")
    finally:
        vm_xml_backup.sync()
        if os.path.exists(tmp_dir):
            shutil.rmtree(tmp_dir)
Пример #54
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestNAError("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Start VM and check the context.
    (4).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_undefine_define_host_selinux",
                               "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_undefine_define_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_undefine_define_vm_sec_model", "selinux")
    sec_label = params.get("svirt_undefine_define_vm_sec_label", None)
    sec_relabel = params.get("svirt_undefine_define_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_undefine_define_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    try:
        xml_file = (os.path.join(data_dir.get_tmp_dir(), "vmxml"))
        if vm.is_alive():
            vm.destroy()
        virsh.dumpxml(vm.name, to_file=xml_file)
        cmd_result = virsh.undefine(vm.name)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to undefine vm."
                                 "Detail: %s" % cmd_result)
        cmd_result = virsh.define(xml_file)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to define vm."
                                 "Detail: %s" % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_vcpu = True

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num)
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num_bk,
                                                                 {},
                                                                 setvcpu_option)

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if not result_vcpu:
            test.fail("Test Failed")
Пример #57
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': vm_sec_model,
                'relabel': vm_sec_relabel}
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {'sec_model': per_sec_model, 'relabel': relabel,
                    'sec_label': sec_label}
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    raise error.TestNAError("The label or model not valid, "
                                            "check more info in bug: https://"
                                            "bugzilla.redhat.com/show_bug.cgi"
                                            "?id=1165485")
                else:
                    raise error.TestFail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                f = os.open(disks[disk_target]['source'], 0)
                stat_re = os.fstat(f)
                disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                os.close(f)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        raise error.TestFail("The disk label is not equal to "
                                             "'%s'." % sec_label_id)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
Пример #58
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Label the VM and disk with proper label.
    (3).Save VM and check the context.
    (4).Restore VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_save_restore_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_save_restore_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_save_restore_vm_sec_model", "selinux")
    sec_label = params.get("svirt_save_restore_vm_sec_label", None)
    sec_relabel = params.get("svirt_save_restore_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get variables about image.
    img_label = params.get('svirt_save_restore_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    # Init a path to save VM.
    save_path = os.path.join(data_dir.get_tmp_dir(), "svirt_save")
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            vm.save_to_file(path=save_path)
            vm.restore_from_file(path=save_path)
            # Save and restore VM successfully.
            if status_error:
                test.fail("Test succeeded in negative case.")
        except virt_vm.VMError as e:
            if not status_error:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                if str(e).count("getfd"):
                    error_msg += ("For more info please refer to"
                                  " https://bugzilla.redhat.com/show_bug.cgi?id=976632")
                test.fail(error_msg)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
Пример #59
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-interface command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """

    dev_obj = params.get("vadu_dev_objs")
    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            raise error.TestNAError("You libvirt version not supported"
                                    " attach/detach Serial devices")

    logging.info("Preparing initial VM state")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)
    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [globals()[class_name](test_params)  # instantiate
                    for class_name in test_params.devs]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params=test_params,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info("Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            test_params.main_vm.start()
        except virt_vm.VMStartError:
            raise error.TestFail('VM Failed to start for some reason!')
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params=test_params,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False,
                                    free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        test_params.cleanup(test_devices)