def vm_recover_check(guest_name, option):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            raise error.TestFail("Guest should be inactive")
        virsh.start(guest_name)
        # This time vm should be in the list
        if vm.is_dead():
            raise error.TestFail("Guest should be active")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after started"
                                         " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of initia guest state")
Example #2
0
    def vm_recover_check(guest_name, option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            raise error.TestFail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive")
        vm_state1 = re.findall(r".*%s.*" % guest_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all")
        vm_state2 = re.findall(r".*%s.*" % guest_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            raise error.TestFail("Guest state should be saved")

        virsh.start(guest_name)
        # This time vm should be in the list
        if vm.is_dead():
            raise error.TestFail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            raise error.TestFail("Guest should be active after"
                                 " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            raise error.TestFail("Managed save image exist "
                                 "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after started"
                                         " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain, it should be in runing state and can be login.
            vm.shutdown()
            vm.wait_for_shutdown()
            vm.start()
            vm.wait_for_login()
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     libvirt.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if skip_check:
         logging.info('Skip checking vm after conversion')
     elif not status_error:
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                 timeout=v2v_timeout):
                 test.fail('Import VM failed')
         if output_mode == 'libvirt':
             try:
                 virsh.start(vm_name, debug=True, ignore_status=False)
             except Exception, e:
                 test.fail('Start vm failed: %s' % str(e))
         # Check guest following the checkpoint document after convertion
         vmchecker = VMChecker(test, params, env)
         params['vmchecker'] = vmchecker
         if params.get('skip_vm_check') != 'yes':
             if checkpoint != 'win2008r2_ostk':
                 ret = vmchecker.run()
                 if len(ret) == 0:
                     logging.info("All common checkpoints passed")
             if checkpoint == 'win2008r2_ostk':
                 check_BSOD()
             # Merge 2 error lists
             error_list.extend(vmchecker.errors)
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    :params: the parameter dictionary
    """
    # vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    # --config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    # The verification of the numa params should
    # be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    try:
        numa_params = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
    # VM XML omit numa entry when the placement is auto and mode is strict
    # So we need to set numa_params manually when exception happens.
    except LibvirtXMLAccessorError:
        numa_params = {"placement": "auto", "mode": "strict"}

    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params["mode"]
    # if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params["nodeset"]
    except KeyError:
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = cpus_parser(nodeset)
    nodeset_from_xml = cpus_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Example #5
0
    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail("The driver of the hostdev interface is not vfio\n")
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail("The dev name or mode of macvtap interface is wrong after attach\n")
        return interface
Example #6
0
 def vm_msave_remove_check(vm_name):
     """
     Check managed save remove command.
     """
     if not os.path.exists(managed_save_file):
         raise error.TestFail("Can't find managed save image")
     virsh.managedsave_remove(vm_name)
     if os.path.exists(managed_save_file):
         raise error.TestFail("Managed save image still exists")
     virsh.start(vm_name)
     # The domain state should be running
     if vm.state() != "running":
         raise error.TestFail("Guest state should be"
                              " running after started")
    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'", shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout)
        resume_time = re.findall(pattern, ret.stdout, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")
Example #8
0
    def restore(self, name):
        dom = name
        name = dom['name']
        doms = self.current_state
        if name in doms:
            self.remove(doms[name])

        domfile = tempfile.NamedTemporaryFile(delete=False)
        fname = domfile.name
        domfile.writelines(dom['inactive xml'])
        domfile.close()

        try:
            if dom['persistent'] == 'yes':
                res = virsh.define(fname)
                if res.exit_status:
                    raise Exception(str(res))
                if dom['state'] != 'shut off':
                    res = virsh.start(name)
                    if res.exit_status:
                        raise Exception(str(res))
            else:
                res = virsh.create(fname)
                if res.exit_status:
                    raise Exception(str(res))
        finally:
            os.remove(fname)

        if dom['autostart'] == 'enable':
            res = virsh.autostart(name, '')
            if res.exit_status:
                raise Exception(str(res))
Example #9
0
    def edit_iface(vm_name):
        """
        Modify vm's interface information by virsh edit command.
        """
        iface_type = params.get("iface_type")
        iface_model = params.get("iface_model")
        edit_error = "yes" == params.get("edit_error", "no")
        if iface_type:
            edit_cmd = (r":%s /<interface type=.*>/<interface type='{0}'>/"
                        "".format(iface_type))
            status = libvirt.exec_virsh_edit(vm_name, [edit_cmd])
        elif iface_model:
            edit_cmd = (r":/<interface/,/<\/interface>/s/<model type=.*\/>/"
                        "<model type='%s'\/>/" % iface_model)
            status = libvirt.exec_virsh_edit(vm_name, [edit_cmd])

        if not status and not edit_error:
            logging.error("Expect success, but failure")
            return False
        if edit_error and status:
            logging.error("Expect error, but success")
            return False

        # Destroy domain and start it to check if vm can be started
        start_error = "yes" == params.get("start_error", "no")
        vm.destroy()
        ret = virsh.start(vm_name, ignore_status=True)
        if start_error and not ret.exit_status:
            logging.error("Vm started unexpectedly")
            return False
        if not start_error and ret.exit_status:
            logging.error("Vm failed to start")
            return False
        return True
Example #10
0
 def vm_managedsave_loop(vm_name, loop_range, libvirtd):
     """
     Run a loop of managedsave command and check its result.
     """
     if vm.is_dead():
         virsh.start(vm_name)
     for i in range(int(loop_range)):
         logging.debug("Test loop: %s" % i)
         virsh.managedsave(vm_name)
         virsh.start(vm_name)
     # Check libvirtd status.
     if not libvirtd.is_running():
         raise error.TestFail("libvirtd is stopped after cmd")
     # Check vm status.
     if vm.state() != "running":
         raise error.TestFail("Guest isn't in running state")
Example #11
0
    def vm_recover_check(guest_name):
        """
        Check if the vm can be recovered correctly.

        @param: guest_name : Checked vm's name.
        """
        ret = virsh.dom_list()
        #This time vm should not be in the list
        if re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
        virsh.start(guest_name)
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
        #This time vm should be in the list
        ret = virsh.dom_list()
        if  not re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
Example #12
0
def state_test():
    states = [ServiceState(), FileState(), DirState(), DomainState(),
              NetworkState(), PoolState(), SecretState(), MountState()]
    for state in states:
        state.backup()
    utils.run('echo hello > /etc/exports')
    virsh.start('virt-tests-vm1')
    virsh.net_autostart('default', '--disable')
    virsh.pool_destroy('mount')
    utils.run('rm /var/lib/virt_test/images/hello')
    utils.run('mkdir /var/lib/virt_test/images/hi')
    utils_libvirtd.Libvirtd().stop()
    utils_selinux.set_status('permissive')
    for state in states:
        lines = state.check(recover=True)
        for line in lines:
            print line
Example #13
0
 def make_unclean_fs():
     """
     Use force off to make unclean file system of win8
     """
     if virsh.start(vm_name, ignore_status=True).exit_status:
         raise exceptions.TestError('Start vm failed')
     time.sleep(10)
     virsh.destroy(vm_name, debug=True)
    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        libvirt_guests.restart()

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.

        def wait_func():
            return not utils.run("service libvirt-guests status"
                                 " | grep 'Resuming guest'",
                                 ignore_status=True).exit_status

        utils_misc.wait_for(wait_func, 5)
        ret = utils.run("service libvirt-guests status",
                        ignore_status=True)
        logging.info("status output: %s", ret.stdout)
        pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        resume_time = re.findall(pattern, ret.stdout, re.M)
        if not resume_time:
            raise error.TestFail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                raise error.TestFail("Checking start_delay failed")
Example #15
0
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     libvirt.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if checkpoint == 'empty_cdrom':
         if status_error:
             log_fail('Virsh dumpxml failed for empty cdrom image')
     elif not status_error:
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                 timeout=v2v_timeout):
                 test.fail('Import VM failed')
         elif output_mode == 'libvirt':
             virsh.start(vm_name, debug=True)
         # Check guest following the checkpoint document after convertion
         logging.info('Checking common checkpoints for v2v')
         vmchecker = VMChecker(test, params, env)
         params['vmchecker'] = vmchecker
         if checkpoint not in ['GPO_AV', 'ovmf']:
             ret = vmchecker.run()
             if len(ret) == 0:
                 logging.info("All common checkpoints passed")
         # Check specific checkpoints
         if checkpoint == 'cdrom':
             virsh_session = utils_sasl.VirshSessionSASL(params)
             virsh_session_id = virsh_session.get_id()
             check_device_exist('cdrom', virsh_session_id)
         if checkpoint.startswith('vmtools'):
             check_vmtools(vmchecker.checker, checkpoint)
         if checkpoint == 'modprobe':
             check_modprobe(vmchecker.checker)
         if checkpoint == 'device_map':
             check_device_map(vmchecker.checker)
         if checkpoint == 'resume_swap':
             check_resume_swap(vmchecker.checker)
         # Merge 2 error lists
         error_list.extend(vmchecker.errors)
     log_check = utils_v2v.check_log(params, output)
     if log_check:
         log_fail(log_check)
     if len(error_list):
         test.fail('%d checkpoints failed: %s' %
                   (len(error_list), error_list))
Example #16
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    @params: the parameter dictionary
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    #--config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    #The verification of the numa params should
    #be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    numa_params = libvirt_xml.VMXML.get_numa_params(vm_name)
    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params['mode']
    #if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params['nodeset']
    except KeyError():
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = nodeset_parser(nodeset)
    nodeset_from_xml = nodeset_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Example #17
0
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     libvirt.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if not status_error and checkpoint != 'vdsm':
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                 timeout=v2v_timeout):
                 test.fail('Import VM failed')
         elif output_mode == 'libvirt':
             try:
                 virsh.start(vm_name, debug=True, ignore_status=False)
             except Exception, e:
                 test.fail('Start vm failed: %s', str(e))
         # Check guest following the checkpoint document after convertion
         logging.info('Checking common checkpoints for v2v')
         vmchecker = VMChecker(test, params, env)
         params['vmchecker'] = vmchecker
         ret = vmchecker.run()
         if len(ret) == 0:
             logging.info("All common checkpoints passed")
         # Check specific checkpoints
         if checkpoint == 'rhev_file':
             check_rhev_file_exist(vmchecker.checker)
         if checkpoint == 'console_xvc0':
             check_grub_file(vmchecker.checker, 'console_xvc0')
         if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
             vmchecker.check_graphics(params[checkpoint])
         if checkpoint == 'sdl':
             if output_mode == 'libvirt':
                 vmchecker.check_graphics({'type': 'vnc'})
             elif output_mode == 'rhev':
                 vmchecker.check_graphics({'type': 'spice'})
         if checkpoint == 'pv_with_regular_kernel':
             check_kernel(vmchecker.checker)
         if checkpoint in ['sound', 'pcspk']:
             check_sound_card(vmchecker.checker, checkpoint)
         if checkpoint == 'rhsrvany_md5':
             check_rhsrvany_md5(vmchecker.checker)
         if checkpoint == 'multidisk':
             check_disk(vmchecker.checker, params['disk_count'])
 def start_and_check():
     """
     Predict the error message when starting and try to start the guest.
     """
     fail_patts = []
     if model == 'pci-bridge' and (index is None or int(index) == 0):
         fail_patts.append(r"PCI bridge index should be > 0")
     res = virsh.start(vm_name)
     libvirt.check_result(res, expected_fails=fail_patts)
     return not res.exit_status
Example #19
0
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     utlv.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if status_error:
         if checkpoint in ['running', 'paused']:
             check_v2v_log(output, 'not_shutdown')
         else:
             check_v2v_log(output, checkpoint)
     else:
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                 timeout=v2v_timeout):
                 raise exceptions.TestFail('Import VM failed')
         if output_mode == 'libvirt':
             try:
                 virsh.start(vm_name, debug=True, ignore_status=False)
             except Exception, e:
                 raise exceptions.TestFail('Start vm failed: %s' % str(e))
         # Check guest following the checkpoint document after convertion
         vmchecker = VMChecker(test, params, env)
         params['vmchecker'] = vmchecker
         ret = vmchecker.run()
         if ret == 0:
             logging.info("All checkpoints passed")
         else:
             raise exceptions.TestFail("%s checkpoints failed" % ret)
         if checkpoint in ['multi_kernel', 'debug_kernel']:
             default_kernel = params.get('defaultkernel')
             check_boot_kernel(default_kernel, debug_kernel)
             if checkpoint == 'multi_kernel':
                 check_vmlinuz_initramfs(output)
         elif checkpoint == 'floppy':
             check_floppy_exist()
         elif checkpoint == 'multi_disks':
             check_disks()
         elif checkpoint == 'multi_netcards':
             check_multi_netcards(params['mac_address'],
                                  vmchecker.virsh_session_id)
         check_v2v_log(output, checkpoint)
    def _start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []

        if channel_type == 'unix' and source_path and source_mode != 'bind':
            fail_patts.append(r"No such file or directory")

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status
Example #21
0
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     libvirt.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if not status_error:
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                 timeout=v2v_timeout):
                 raise exceptions.TestFail('Import VM failed')
         elif output_mode == 'libvirt':
             try:
                 virsh.start(vm_name, debug=True, ignore_status=False)
             except Exception, e:
                 raise exceptions.TestFail('Start vm failed: %s', str(e))
         if checkpoint:
             if checkpoint == 'rhev_file':
                 check_rhev_file_exist()
             elif checkpoint == 'console_xvc0':
                 check_grub_file('console_xvc0')
             check_v2v_log(output, checkpoint)
Example #22
0
 def check_result(cmd, result, status_error):
     """
     Check virt-v2v command result
     """
     utlv.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if not status_error:
         if output_mode == "rhev":
             ovf = get_ovf_content(output)
             logging.debug("ovf content: %s", ovf)
             if '--vmtype' in cmd:
                 expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                 check_vmtype(ovf, expected_vmtype)
         if '-oa' in cmd and '--no-copy' not in cmd:
             expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
             img_path = get_img_path(output)
             check_image(img_path, "allocation", expected_mode)
         if '-of' in cmd and '--no-copy' not in cmd:
             expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
             img_path = get_img_path(output)
             check_image(img_path, "format", expected_format)
         if '-on' in cmd:
             expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
             check_new_name(output, expected_name)
         if '--no-copy' in cmd:
             check_nocopy(output)
         if '-oc' in cmd:
             expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
             check_connection(output, expected_uri)
         if output_mode == "rhev":
             if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                 raise error.TestFail("Import VM failed")
             else:
                 params['vmcheck'] = utils_v2v.VMCheck(test, params, env)
                 if attach_disks:
                     check_disks(params.get("ori_disks"))
         if output_mode == "libvirt":
             if "qemu:///session" not in v2v_options:
                 virsh.start(vm_name, debug=True, ignore_status=False)
Example #23
0
 def offline_pin_and_check(vm, vcpu, cpu_list):
     """
     Edit domain xml to pin vcpu and check the result.
     """
     cputune = vm_xml.VMCPUTuneXML()
     cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
     vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
     vmxml.cputune = cputune
     vmxml.sync()
     cmdResult = virsh.start(vm.name, debug=True)
     libvirt.check_exit_status(cmdResult, status_error)
     pid = vm.get_pid()
     vcpu_pid = vm.get_vcpus_pid()[vcpu]
     check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
Example #24
0
    def edit_memory(source):
        """
        Modify vm's maximum and current memory(unit and value).

        :param source: virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        mem_unit = params.get("mem_unit", "K")
        mem_value = params.get("mem_value", "1048576")
        mem_delta = params.get("mem_delta", 1000)
        edit_cmd = []
        del_cmd = r":g/currentMemory/d"
        edit_cmd.append(del_cmd)
        update_cmd = r":%s/<memory unit='KiB'>[0-9]*<\/memory>/<memory unit='"
        update_cmd += mem_unit + "'>" + mem_value + r"<\/memory>"
        edit_cmd.append(update_cmd)
        try:
            expected_mem = int(utils_misc.normalize_data_size(
                mem_value + mem_unit, 'K').split('.')[0])
        except ValueError:
            logging.error("Fail to translate %s to KiB", mem_value + mem_unit)
            return False
        logging.debug("Expected max memory is %s", expected_mem)
        status = libvirt.exec_virsh_edit(source, edit_cmd)
        try:
            if status:
                # Restart vm to check memory value
                virsh.destroy(vm_name)
                virsh.start(vm_name)
                new_mem = vm.get_max_mem()
                if new_mem - expected_mem > int(mem_delta):
                    logging.error("New max memory %s is not excepted", new_mem)
                    return False
        except Exception as e:
            logging.error("Error occured when check domain memory: %s", e)
            return False
        return status
Example #25
0
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name, save_file, save_option,
                                ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name, managedsave_option,
                                       ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
Example #26
0
 def edit_rng(vm_name):
     """
     Modify rng device in xml.
     """
     rng_model = params.get("rng_model")
     rng_backend = params.get("rng_backend")
     backend_model = params.get("backend_model")
     backend_type = params.get("backend_type")
     edit_error = "yes" == params.get("edit_error", "no")
     edit_cmd = []
     del_cmd = r":g/<rng.*<\/rng>/d"
     edit_cmd.append(del_cmd)
     if backend_type:
         bc_type = "type='%s'" % backend_type
     else:
         bc_type = ""
     update_cmd = (r":/<devices>/s/$/<rng model='%s'>"
                   "<backend model='%s' %s>%s<\/backend><\/rng>"
                   % (rng_model, backend_model,
                      bc_type, rng_backend))
     edit_cmd.append(update_cmd)
     status = libvirt.exec_virsh_edit(vm_name, edit_cmd)
     vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
     if not libvirtd.is_running():
         logging.error("libvirtd isn't running")
         return False
     if not status and not edit_error:
         logging.error("Expect success, but failure")
         return False
     if edit_error and status:
         logging.error("Expect error, but success")
         return False
     # Destroy domain and start it to check if vm can be started
     start_error = "yes" == params.get("start_error", "no")
     vm.destroy()
     ret = virsh.start(vm_name, ignore_status=True)
     if start_error and not ret.exit_status:
         logging.error("Vm started unexpectedly")
         return False
     if not start_error and ret.exit_status:
         logging.error("Vm failed to start")
         return False
     return True
Example #27
0
def run(test, params, env):
    """
    Test vm features
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    hyperv_attr = eval(params.get('hyperv_attr', '{}'))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    try:

        # Set hyperv attribs if there're attribs to set
        if hyperv_attr:
            if set(hyperv_attr.keys()).intersection(('tlbflush',
                                                     'frequencies',
                                                     'reenlightenment')):

                # Compare libvirt version to decide if test is valid
                if not libvirt_version.version_compare(5, 0, 0):
                    test.cancel('This version of libvirt does\'nt support '
                                'the setting: %r' % hyperv_attr)

            vm_xml.VMXML.set_vm_features(
                vm_name,
                **{'hyperv_%s_state' % key: value
                   for key, value in hyperv_attr.items()}
            )

            # Test vm start
            ret = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(ret)
            vm.wait_for_login().close()

            # Check hyperv settings in qemu command line
            for attr in hyperv_attr:
                libvirt.check_qemu_cmd_line('hv_' + attr)

    finally:
        bkxml.sync()
    def result_confirm(self, params):
        """
        Confirm if virt-clone executed succeed
        """
        if self.kill_first:
            # Stop this threading
            first_pid = self.cgroup.get_pids(self.cgroup_index)[-1]
            utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
        else:
            self.td0.join(self.time_out)
        if self.td1:
            self.td1.join(self.time_out)
        abnormal_type = params.get("abnormal_type")
        if abnormal_type == "cpu_lack":
            if not virsh.domain_exists(self.vm_new_name):
                self.test.fail("Clone '%s' failed" % self.vm_new_name)
            else:
                result = virsh.start(self.vm_new_name, ignore_status=True)
                if result.exit_status:
                    self.test.fail("Cloned domain cannot be started!")
        elif abnormal_type == "disk_lack":
            if virsh.domain_exists(self.vm_new_name):
                self.test.fail("Clone '%s' succeed but expect failed!"
                               % self.vm_new_name)
        else:
            if self.twice_execute and not self.kill_first:
                if virsh.domain_exists(self.vm_new_name):
                    self.test.fail("Clone '%s' succeed but expect"
                                   " failed!" % self.vm_new_name)
                if virsh.domain_exists(self.vm_new_name1):
                    self.test.fail("Clone '%s' succeed but expect"
                                   " failed!" % self.vm_new_name1)

            elif self.twice_execute and self.kill_first:
                if not virsh.domain_exists(self.vm_new_name):
                    self.test.fail("Clone '%s' failed!"
                                   % self.vm_new_name)
Example #29
0
 def check_result(result, status_error):
     """
     Check virt-v2v command result
     """
     libvirt.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if not status_error and checkpoint != 'vdsm':
         if output_mode == 'rhev':
             if not utils_v2v.import_vm_to_ovirt(
                     params, address_cache, timeout=v2v_timeout):
                 test.fail('Import VM failed')
         elif output_mode == 'libvirt':
             try:
                 virsh.start(vm_name, debug=True, ignore_status=False)
             except Exception as e:
                 test.fail('Start vm failed: %s', str(e))
         # Check guest following the checkpoint document after convertion
         logging.info('Checking common checkpoints for v2v')
         vmchecker = VMChecker(test, params, env)
         params['vmchecker'] = vmchecker
         if params.get('skip_vm_check') != 'yes':
             ret = vmchecker.run()
             if len(ret) == 0:
                 logging.info("All common checkpoints passed")
         else:
             logging.info('Skip checking vm after conversion: %s' %
                          skip_reason)
         # Check specific checkpoints
         if checkpoint == 'console_xvc0':
             check_grub_file(vmchecker.checker, 'console_xvc0')
         if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
             vmchecker.check_graphics(params[checkpoint])
         if checkpoint == 'sdl':
             if output_mode == 'libvirt':
                 vmchecker.check_graphics({'type': 'vnc'})
             elif output_mode == 'rhev':
                 vmchecker.check_graphics({'type': 'spice'})
         if checkpoint == 'pv_with_regular_kernel':
             check_kernel(vmchecker.checker)
         if checkpoint in ['sound', 'pcspk']:
             check_sound_card(vmchecker.checker, checkpoint)
         if checkpoint == 'rhsrvany_md5':
             check_rhsrvany_md5(vmchecker.checker)
         if checkpoint == 'multidisk':
             check_disk(vmchecker.checker, params['disk_count'])
     log_check = utils_v2v.check_log(params, output)
     if log_check:
         log_fail(log_check)
     # Merge 2 error lists
     if params.get('vmchecker'):
         error_list.extend(params['vmchecker'].errors)
     # Virtio drivers will not be installed without virtio-win setup
     if checkpoint == 'virtio_win_unset':
         missing_list = params.get('missing').split(',')
         expect_errors = ['Not find driver: ' + x for x in missing_list]
         logging.debug('Expect errors: %s' % expect_errors)
         logging.debug('Actual errors: %s' % error_list)
         if set(error_list) == set(expect_errors):
             error_list[:] = []
         else:
             logging.error('Virtio drivers not meet expectation')
     if len(error_list):
         test.fail('%d checkpoints failed: %s' %
                   (len(error_list), error_list))
Example #30
0
    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)

        if vf_type != "hostdev":
            get_ip_by_mac(mac_addr, timeout=60)

        device = live_xml.devices

        if vf_type == "hostdev":
            hostdev_list = device.by_device_tag("hostdev")
            if len(hostdev_list) == 0:
                test.fail("The hostdev device attach failed from xml\n")
            else:
                for hostdev in hostdev_list:
                    if hostdev.type == "pci":
                        break
                interface = hostdev

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface
Example #31
0
def run(test, params, env):
    """
    Test startupPolicy for CD-ROM/floppy/Volume disks.

    Steps:
    1. Prepare disk media image.
    2. Setup startupPolicy for a disk.
    3. Start the domain.
    4. Save the domain.
    5. Remove the disk source file and restore the domain.
    6. Update startupPolicy for  a disk.
    7. Destroy the domain.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    startup_policy = params.get("policy")

    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user="",
            chap_passwd="",
            portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def create_volume(pvt, created_vol_name=None):
        """
        Create iSCSI volume.

        :param pvt: PoolVolumeTest object
        :param created_vol_name: Created volume name
        """
        try:
            if pool_type == "iscsi":
                create_iscsi_pool()
            else:
                pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
                pvt.pre_vol(vol_name=created_vol_name,
                            vol_format=vol_format,
                            capacity=capacity,
                            allocation=None,
                            pool_name=pool_name)
        except Exception as pool_exception:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **virsh_dargs)
            test.error("Error occurred when prepare" +
                       "pool xml with message %s:\n" % str(pool_exception))

        def get_vol():
            """Get the volume info"""
            # Refresh the pool
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(pool_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            vol_list = []
            vol_list = re.findall(r"(\S+)\ +(\S+)",
                                  str(cmd_result.stdout.strip()))
            try:
                return vol_list[1]
            except IndexError:
                return None

        # Wait for a while so that we can get the volume info
        vol_info = utils_misc.wait_for(get_vol, 10)
        if vol_info:
            tmp_vol_name, tmp_vol_path = vol_info
        else:
            test.error("Failed to get volume info")
        process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'),
                    shell=True)
        return vol_info

    def check_disk_source(vm_name, target_dev, expect_value):
        """
        Check the disk source: file and startupPolicy.

        :param vm_name: Domain name
        :param target_dev: Disk's target device
        :param expect_value: Expect value of source file and source startupPolicy
        """
        logging.debug("Expect source file is '%s'", expect_value[0])
        logging.debug("Expect source startupPolicy is '%s'", expect_value[1])
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        source_value = []
        try:
            disk_source = disks[target_dev].find('source')
            source_value.append(disk_source.get('file'))
            source_value.append(disk_source.get('startupPolicy'))
        except KeyError:
            test.error("No %s in domain %s" % (target_dev, vm_name))
        logging.debug("Actual source file is '%s'", source_value[0])
        logging.debug("Actual source startupPolicy is '%s'", source_value[1])
        if source_value == expect_value:
            logging.debug("Domain disk XML check pass")
        else:
            test.error("Domain disk XML check fail")

    def create_disk_xml():
        """
        Create a disk xml file for attaching to a domain.
        """
        if disk_type == "file":
            process.run("qemu-img create %s %s" % (media_file, image_size),
                        shell=True)
        disk_params = {
            'device_type': device_type,
            'type_name': disk_type,
            'target_dev': target_dev,
            'target_bus': target_bus
        }
        if disk_type == "file":
            disk_params_src = {
                'source_protocol': "file",
                'source_file': media_file,
                'source_startupPolicy': startup_policy
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': pool_name,
                'source_volume': vol_name,
                'driver_type': 'qcow2',
                'source_startupPolicy': startup_policy
            }
            if pool_type == "iscsi":
                disk_params_src.update({'source_mode': "host"})
        disk_params.update(disk_params_src)
        disk_xml = libvirt.create_disk_xml(disk_params)
        shutil.copyfile(disk_xml, disk_xml_file)
        return disk_xml

    def check_in_vm(old_parts):
        """
        Check mount/read/write disk in VM.

        :param old_parts: pre-operated partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            logging.debug("new parted:%s", new_parts)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            added_part = added_parts[0]
            if not added_part:
                logging.error("Can't see added partition in VM")
                return False
            if 'sr' not in added_part and 'fd' not in added_part:
                cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                       "mkdir -p test && mount /dev/{0} test && echo"
                       " teststring > test/testfile && umount test".format(
                           added_part))
                status, output = session.cmd_status_output(cmd)
                logging.info("Check disk operation in VM:\n%s", output)
                if status != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_policy_update(origin_policy, policy_list, xml_policy_file,
                            device_type, flag_str):
        """
        Check updated policy after executing virsh update-device.

        :param origin_policy: the inherit startup policy value.
        :param policy_list: updated policy list.
        :param xml_policy_file: xml file for startupPolicy.
        :param device_type: device type,cdrom or disk.,etc
        :param flag_str: it can be --config,--live and --persistent.
        """
        for policy in policy_list:
            xmltreefile = XMLTreeFile(xml_policy_file)
            try:
                policy_item = xmltreefile.find('/source')
                policy_item.set('startupPolicy', policy)
            except AttributeError as elem_attr:
                test.error("Fail to find startupPolicy attribute.%s",
                           str(elem_attr))
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            ret = virsh.update_device(vm_name,
                                      xml_policy_file,
                                      flagstr=flag_str,
                                      debug=True)
            if all([device_type == "disk", policy == "requisite"]):
                libvirt.check_exit_status(ret, True)
                return
            else:
                libvirt.check_exit_status(ret)

            def check_policy_value(active_policy, inactive_policy):
                """
                Check policy value in dumpxml with active or inactive option

                :param active_policy: active policy attribute value
                :param inactive_policy: inactive policy attribute value
                """
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list) - 1]
                if not active_policy == disk.source.attrs["startupPolicy"]:
                    test.error(
                        "Actual policy:%s in active state is not equal to expected:%s"
                        % (active_policy, disk.source.attrs["startupPolicy"]))
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list) - 1]
                if not inactive_policy == disk.source.attrs["startupPolicy"]:
                    test.error(
                        "Actual policy:%s in inactive state is not equal to expected: %s"
                        %
                        (inactive_policy, disk.source.attrs["startupPolicy"]))

            if flag_str == "--live":
                check_policy_value(policy, origin_policy)
            elif flag_str == "--config":
                check_policy_value(origin_policy, policy)
            elif flag_str == "--persistent":
                check_policy_value(policy, policy)

    def check_source_update(xml_policy_file):
        """
        Update source and policy at the same time,then check those changes.

        :param xml_policy_file: VM xml policy file
        """
        xmltreefile = XMLTreeFile(xml_policy_file)
        policy_item = xmltreefile.find('/source')

        def configure_startup_policy(update=False, policy='optional'):
            """
            Configure startupPolicy attribute value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            if update:
                del policy_item.attrib["startupPolicy"]
            else:
                policy_item.set("startupPolicy", policy)
            flag_option = "--live"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        # Update source and startUpPolicy attribute value.
        def update_source_policy(update=True, policy='optional'):
            """
            Update startupPolicy source value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            source_file = policy_item.get('file')
            if update:
                new_source_file = source_file + ".empty"
            else:
                new_source_file = source_file + ".new"
            shutil.copyfile(source_file, new_source_file)
            policy_item.set("file", new_source_file)
            policy_item.set("startupPolicy", policy)
            flag_option = "--persistent"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        function_list = [
            configure_startup_policy, update_source_policy,
            configure_startup_policy, update_source_policy
        ]
        function_parameter = [False, False, True, True]
        # Loop all above scenarios to update device.
        for index in list(range(len(function_list))):
            try:
                func = function_list[index]
                para = function_parameter[index]
                flag_option, update_error = func(para)
                ret = virsh.update_device(vm_name,
                                          xml_policy_file,
                                          flagstr=flag_option,
                                          debug=True)
                libvirt.check_exit_status(ret, expect_error=update_error)
            except AttributeError as elem_attr:
                test.error("Fail to remove startupPolicy attribute:%s" %
                           str(elem_attr))
            except Exception as update_device_exception:
                test.error("Fail to update device:%s" %
                           str(update_device_exception))
            finally:
                source_file = policy_item.get('file')
                new_source_file = source_file + ".new"
                if os.path.exists(new_source_file):
                    os.remove(new_source_file)

    def rename_file(source_file, target_file, revert=False):
        """
        Rename a file or revert it.

        :param source_file: The source file name.
        :param target_file: The target file name.
        :param revert: It can be True or False.
        """
        try:
            if not revert:
                os.rename(source_file, target_file)
                logging.debug("Rename %s to %s", source_file, target_file)
            else:
                os.rename(target_file, source_file)
                logging.debug("Rename %s to %s", target_file, source_file)
        except OSError as err:
            test.fail("Rename image failed: %s" % str(err))

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Get start,restore configuration parameters.
    start_error = "yes" == params.get("start_error", "no")
    restore_error = "yes" == params.get("restore_error", "no")
    virsh_dargs = {'debug': True, 'ignore_status': True}
    attach_option = params.get("attach_option")

    # Create disk xml and attach it.
    device_type = params.get("device_type")
    disk_type = params.get("disk_type", "network")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    target_dev = params.get("target_dev")
    target_bus = params.get("disk_target_bus", "virtio")
    image_size = params.get("image_size", "1.44M")
    emulated_image = "emulated-iscsi"

    # Storage pool and volume related paramters.
    pool_name = params.get("pool_name", "iscsi_pool")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    vol_name = params.get("volume_name")
    capacity = params.get("volume_size", "1048576")
    vol_format = params.get("volume_format")

    # Source file parameters.
    media_name = params.get("media_name")
    media_file = os.path.join(data_dir.get_tmp_dir(), media_name)
    media_file_new = media_file + ".new"
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    snapshot_name = "s1"

    # Policy related paramters.
    disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml")
    disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(),
                                        "attach_policy_disk.xml")
    update_policy = "yes" == params.get("update_policy", "no")
    policy_only = "yes" == params.get("policy_only", "no")
    update_policy_list = params.get("update_policy_list").split()
    expect_value = [None, startup_policy]

    try:
        if disk_type == "volume":
            pvt = libvirt.PoolVolumeTest(test, params)
            vol_name, vol_path = create_volume(pvt, vol_name)
            vol_path_new = vol_path + ".new"

        # Create disk xml.
        create_disk_xml()
        if vm.is_alive():
            vm.destroy()
        try:
            # Backup disk xml file for policy update if update_policy=True.
            if update_policy:
                shutil.copyfile(disk_xml_file, disk_xml_policy_file)
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml_file,
                                         flagstr="--config",
                                         **virsh_dargs)
            # For iSCSI pool volume,startupPolicy attribute is not valid for it.
            # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy.
            if pool_type == "iscsi" or all(
                [device_type == "disk", startup_policy == "requisite"]):
                libvirt.check_exit_status(result, expect_error=True)
                return
            else:
                libvirt.check_exit_status(result, expect_error=False)
        except Exception as attach_device_exception:
            logging.debug("Attach device throws exception:%s",
                          str(attach_device_exception))
            os.remove(media_file)
            test.error("Attach %s fail" % device_type)
        # Check update policy operations.
        if disk_type == "file" and update_policy:
            vm.start()
            if policy_only:
                check_policy_update(startup_policy, update_policy_list,
                                    disk_xml_policy_file, device_type,
                                    attach_option)
            else:
                check_source_update(disk_xml_policy_file)
        elif disk_type == "file":
            # Step 1. Start domain and destroy it normally
            vm.start()
            vm.destroy()

            # Step 2. Remove the source_file then start the domain
            rename_file(media_file, media_file_new)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # For libvirt version >=2.0.0, feature is updated and startup policy attribute
            # can not exist alone without source protocol.
            if not start_error and not libvirt_version.version_compare(
                    2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 3. Move back the source file and start the domain(if needed).
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                vm.start()

            # Step 4. Save the domain normally, then remove the source file
            # and restore it back
            vm.save_to_file(save_file)
            rename_file(media_file, media_file_new)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)
            if not restore_error and not libvirt_version.version_compare(
                    2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 5. Move back the source file and restore the domain(if needed)
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                result = virsh.restore(save_file, **virsh_dargs)
                libvirt.check_exit_status(result, expect_error=False)
        elif disk_type == "volume":
            # Step 1. Start domain and destroy it normally.
            vm.start()
            # Step 1 Start VM successfully.
            if not check_in_vm(old_parts):
                test.fail("Check disk partitions in VM failed")

            # Step 2 Destroy VM, move the volume to other place, refresh the pool, then start the guest.
            vm.destroy()
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # Step 3 Move back the source file and start.
            rename_file(vol_path, vol_path_new, revert=True)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            if not vm.is_alive():
                vm.start()

            # Step 4 Save the domain normally, then remove the source file,then restore domain.
            vm.save_to_file(save_file)
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)

            # Step 5, Create snapshot,move the source to other place,then revert snapshot.
            if device_type == "disk":
                rename_file(vol_path, vol_path_new, revert=True)
                cmd_result = virsh.pool_refresh(pool_name)
                libvirt.check_exit_status(cmd_result)
                if restore_error:
                    result = virsh.restore(save_file, **virsh_dargs)
                    libvirt.check_exit_status(result)
                ret = virsh.snapshot_create_as(vm_name, snapshot_name,
                                               **virsh_dargs)
                libvirt.check_exit_status(ret)
                rename_file(vol_path, vol_path_new)
                ret = virsh.snapshot_revert(vm_name, snapshot_name,
                                            **virsh_dargs)
                # Clean up snapshot.
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()

        if disk_type == "volume":
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **virsh_dargs)
        if os.path.exists(save_file):
            os.remove(save_file)
        if os.path.exists(disk_xml_file):
            os.remove(disk_xml_file)
        if os.path.exists(media_file):
            os.remove(media_file)
        if os.path.exists(disk_xml_policy_file):
            os.remove(disk_xml_policy_file)
Example #32
0
def run(test, params, env):
    """
    Test interface with unprivileged user
    """

    def create_bridge(br_name, iface_name):
        """
        Create bridge attached to physical interface
        """
        # Make sure the bridge not exist
        if libvirt.check_iface(br_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % br_name)

        # Create bridge
        utils_package.package_install('tmux')
        cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \
              ' ip link set {1} master {0}; ip link set {0} up; pkill dhclient; ' \
              'sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name)
        process.run(cmd, shell=True, verbose=True)

    def check_ping(dest_ip, ping_count, timeout, src_ip=None, session=None,
                   expect_success=True):
        """
        Check if ping result meets expectation
        """
        status, output = utils_net.ping(dest=dest_ip, count=ping_count,
                                        interface=src_ip, timeout=timeout,
                                        session=session, force_ipv4=True)
        success = True if status == 0 else False

        if success != expect_success:
            test.fail('Ping result not met expectation, '
                      'actual result is {}'.format(success))

    if not libvirt_version.version_compare(5, 6, 0):
        test.cancel('Libvirt version is too low for this test.')

    vm_name = params.get('main_vm')
    rand_id = '_' + utils_misc.generate_random_string(3)

    upu_vm_name = 'upu_vm' + rand_id
    user_vm_name = params.get('user_vm_name', 'non_root_vm')
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    device_type = params.get('device_type', '')
    iface_name = utils_net.get_net_if(state="UP")[0]
    tap_name = params.get('tap_name', 'mytap0') + rand_id
    macvtap_name = params.get('macvtap_name', 'mymacvtap0') + rand_id
    remote_ip = params.get('remote_ip')
    up_user = params.get('up_user', 'test_upu') + rand_id
    case = params.get('case', '')

    # Create unprivileged user
    logging.info('Create unprivileged user %s', up_user)
    process.run('useradd %s' % up_user, shell=True, verbose=True)
    root_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    upu_args = {
        'unprivileged_user': up_user,
        'ignore_status': False,
        'debug': True,
    }

    try:
        # Create vm as unprivileged user
        logging.info('Create vm as unprivileged user')
        upu_vmxml = root_vmxml.copy()

        # Prepare vm for unprivileged user
        xml_devices = upu_vmxml.devices
        disks = xml_devices.by_device_tag("disk")
        for disk in disks:
            ori_path = disk.source['attrs'].get('file')
            if not ori_path:
                continue

            file_name = ori_path.split('/')[-1]
            new_disk_path = '/home/{}/{}'.format(up_user, file_name)
            logging.debug('New disk path:{}'.format(new_disk_path))

            # Copy disk image file and chown to make sure that
            # unprivileged user has access
            shutil.copyfile(ori_path, new_disk_path)
            shutil.chown(new_disk_path, up_user, up_user)

            # Modify xml to set new path of disk
            disk_index = xml_devices.index(disk)
            source = xml_devices[disk_index].source
            new_attrs = source.attrs
            new_attrs['file'] = new_disk_path
            source.attrs = new_attrs
            xml_devices[disk_index].source = source
            logging.debug(xml_devices[disk_index].source)

        upu_vmxml.devices = xml_devices

        new_xml_path = '/home/{}/upu.xml'.format(up_user)
        shutil.copyfile(upu_vmxml.xml, new_xml_path)

        # Define vm for unprivileged user
        virsh.define(new_xml_path, **upu_args)
        virsh.domrename(vm_name, upu_vm_name, **upu_args)
        logging.debug(virsh.dumpxml(upu_vm_name, **upu_args))
        upu_vmxml = vm_xml.VMXML()
        upu_vmxml.xml = virsh.dumpxml(upu_vm_name, **upu_args).stdout_text

        if case == 'precreated':
            if device_type == 'tap':
                # Create bridge
                create_bridge(bridge_name, iface_name)

                # Create tap device
                tap_cmd = 'ip tuntap add mode tap user {user} group {user} ' \
                          'name {tap};ip link set {tap} up;ip link set {tap} ' \
                          'master {br}'.format(tap=tap_name, user=up_user,
                                               br=bridge_name)

                # Execute command as root
                process.run(tap_cmd, shell=True, verbose=True)

            if device_type == 'macvtap':
                # Create macvtap device
                mac_addr = utils_net.generate_mac_address_simple()
                macvtap_cmd = 'ip link add link {iface} name {macvtap} address' \
                              ' {mac} type macvtap mode bridge;' \
                              'ip link set {macvtap} up'.format(
                               iface=iface_name,
                               macvtap=macvtap_name,
                               mac=mac_addr)
                process.run(macvtap_cmd, shell=True, verbose=True)
                cmd_get_tap = 'ip link show {} | head -1 | cut -d: -f1'.format(macvtap_name)
                tap_index = process.run(cmd_get_tap, shell=True, verbose=True).stdout_text.strip()
                device_path = '/dev/tap{}'.format(tap_index)
                logging.debug('device_path: {}'.format(device_path))
                # Change owner and group for device
                process.run('chown {user} {path};chgrp {user} {path}'.format(
                    user=up_user, path=device_path),
                    shell=True, verbose=True)
                # Check if device owner is changed to unprivileged user
                process.run('ls -l %s' % device_path, shell=True, verbose=True)

            # Modify interface
            all_devices = upu_vmxml.devices
            iface_list = all_devices.by_device_tag('interface')
            if not iface_list:
                test.error('No iface to modify')
            iface = iface_list[0]

            # Remove other interfaces
            for ifc in iface_list[1:]:
                all_devices.remove(ifc)

            if device_type == 'tap':
                dev_name = tap_name
            elif device_type == 'macvtap':
                dev_name = macvtap_name
            else:
                test.error('Invalid device type: {}'.format(device_type))

            if_index = all_devices.index(iface)
            iface = all_devices[if_index]
            iface.type_name = 'ethernet'
            iface.target = {
                'dev': dev_name,
                'managed': 'no'
            }

            if device_type == 'macvtap':
                iface.mac_address = mac_addr
            logging.debug(iface)

            upu_vmxml.devices = all_devices
            logging.debug(upu_vmxml)

            # Define updated xml
            shutil.copyfile(upu_vmxml.xml, new_xml_path)
            upu_vmxml.xml = new_xml_path
            virsh.define(new_xml_path, **upu_args)

            # Switch to unprivileged user and modify vm's interface
            # Start vm as unprivileged user and test network
            virsh.start(upu_vm_name, debug=True, ignore_status=False,
                        unprivileged_user=up_user)
            cmd = ("su - %s -c 'virsh console %s'"
                   % (up_user, upu_vm_name))
            session = aexpect.ShellSession(cmd)
            session.sendline()
            remote.handle_prompts(session, params.get("username"),
                                  params.get("password"), r"[\#\$]\s*$", 60)
            logging.debug(session.cmd_output('ifconfig'))
            check_ping(remote_ip, 5, 10, session=session)
            session.close()

    finally:
        if 'upu_virsh' in locals():
            virsh.destroy(upu_vm_name, unprivileged_user=up_user)
            virsh.undefine(upu_vm_name, unprivileged_user=up_user)
        if case == 'precreated':
            try:
                if device_type == 'tap':
                    process.run('ip tuntap del mode tap {}'.format(tap_name), shell=True, verbose=True)
                elif device_type == 'macvtap':
                    process.run('ip l del {}'.format(macvtap_name), shell=True, verbose=True)
            except Exception:
                pass
            finally:
                cmd = 'tmux -c "ip link set {1} nomaster;  ip link delete {0};' \
                      'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name)
                process.run(cmd, shell=True, verbose=True, ignore_status=True)
        process.run('pkill -u {0};userdel -f -r {0}'.format(up_user), shell=True, verbose=True, ignore_status=True)
Example #33
0
    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utils_v2v.check_exit_status(result, status_error, error_flag)
        output = to_text(result.stdout + result.stderr, errors=error_flag)
        output_stdout = to_text(result.stdout, errors=error_flag)
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        test.fail('Error log longer than 72 charactors: %s' %
                                  line)
            if checkpoint == 'disk_not_exist':
                vol_list = virsh.vol_list(pool_name)
                logging.info(vol_list)
                if vm_name in vol_list.stdout:
                    test.fail('Disk exists for vm %s' % vm_name)
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    test.fail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    test.fail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options and not no_root:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == ['vmx', 'vmx_ssh']:
                vmchecker = VMChecker(test, params, env)
                params['vmchecker'] = vmchecker
                params['vmcheck_flag'] = True
                ret = vmchecker.run()
                if len(ret) == 0:
                    logging.info("All common checkpoints passed")
            if checkpoint == 'quiet':
                if len(output.strip().splitlines()) > 10:
                    test.fail('Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    test.fail('libguestfs-winsupport not in dependency')
                if all(pkg_pattern not in output
                       for pkg_pattern in ['VMF', 'edk2-ovmf']):
                    test.fail('OVMF/AAVMF not in dependency')
                if 'qemu-kvm-rhev' in output:
                    test.fail('qemu-kvm-rhev is in dependency')
                if 'libX11' in output:
                    test.fail('libX11 is in dependency')
                if 'kernel-rt' in output:
                    test.fail('kernel-rt is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    test.fail('Command "%s" success' % command % win_img)
            if checkpoint == 'no_dcpath':
                if '--dcpath' in output:
                    test.fail('"--dcpath" is not removed')
            if checkpoint == 'debug_overlays':
                search = re.search('Overlay saved as(.*)', output)
                if not search:
                    test.fail('Not find log of saving overlays')
                overlay_path = search.group(1).strip()
                logging.debug('Overlay file location: %s' % overlay_path)
                if os.path.isfile(overlay_path):
                    logging.info('Found overlay file: %s' % overlay_path)
                else:
                    test.fail('Overlay file not saved')
            if checkpoint.startswith('empty_nic_source'):
                target_str = '%s "eth0" mac: %s' % (params[checkpoint][0],
                                                    params[checkpoint][1])
                logging.info('Expect log: %s', target_str)
                if target_str not in output_stdout.lower():
                    test.fail('Expect log not found: %s' % target_str)
            if checkpoint == 'print_source':
                check_source(output_stdout)
            if checkpoint == 'machine_readable':
                if os.path.exists(params.get('example_file', '')):
                    # Checking items in example_file exist in latest
                    # output regardless of the orders and new items.
                    with open(params['example_file']) as f:
                        for line in f:
                            if line.strip() not in output_stdout.strip():
                                test.fail(
                                    '%s not in --machine-readable output' %
                                    line.strip())
                else:
                    test.error('No content to compare with')
            if checkpoint == 'compress':
                img_path = get_img_path(output)
                logging.info('Image path: %s', img_path)

                qemu_img_cmd = 'qemu-img check %s' % img_path
                qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support(
                )
                if qemu_img_locking_feature_support:
                    qemu_img_cmd = 'qemu-img check %s -U' % img_path

                disk_check = process.run(qemu_img_cmd).stdout_text
                logging.info(disk_check)
                compress_info = disk_check.split(',')[-1].split('%')[0].strip()
                compress_rate = float(compress_info)
                logging.info('%s%% compressed', compress_rate)
                if compress_rate < 0.1:
                    test.fail('Disk image NOT compressed')
            if checkpoint == 'tail_log':
                messages = params['tail'].get_output()
                logging.info('Content of /var/log/messages during conversion:')
                logging.info(messages)
                msg_content = params['msg_content']
                if msg_content in messages:
                    test.fail('Found "%s" in /var/log/messages' % msg_content)
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            test.fail(log_check)
        check_man_page(params.get('in_man'), params.get('not_in_man'))
Example #34
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
    except error.CmdError:
        raise error.TestError("Guest prepare action error!")

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, extra, ignore_status=True)
        status = result.exit_status
        output = result.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error:
        if not status:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status or not output:
            raise error.TestFail("Run failed with right command")
        if extra.count("reason"):
            if vm_action == "suspend":
                # If not, will cost long time to destroy vm
                virsh.destroy(vm_name)
                if not output.count("user"):
                    raise ActionError(vm_action)
            elif vm_action == "resume":
                if not output.count("unpaused"):
                    raise ActionError(vm_action)
            elif vm_action == "destroy":
                if not output.count("destroyed"):
                    raise ActionError(vm_action)
            elif vm_action == "start":
                if not output.count("booted"):
                    raise ActionError(vm_action)
        if vm_ref == "remote":
            if not (re.search("running", output) or re.search(
                    "blocked", output) or re.search("idle", output)):
                raise error.TestFail("Run failed with right command")
Example #35
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)
    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
Example #36
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def affinity_from_vcpupin(vm_name, vcpu):
        """
        This function returns list of vcpu's affinity from vcpupin output

        :param vm_name: VM Name
        :param vcpu: VM cpu pid
        :return : list of affinity to vcpus
        """
        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        vcpus_affinity = {}
        output = virsh.vcpupin(vm_name).stdout
        for item in output.split('\n')[2:-2]:
            vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip()
        return utils_test.libvirt.cpus_string_to_affinity_list(
            vcpus_affinity[str(vcpu)], int(total_cpu))

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip()
        logging.debug("Debug: cpulist %s", cpu_list)
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expected affinity: %s", expected_output)

        # Check for affinity value from vcpuinfo output
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)
        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpuinfo command output")

        # Check for affinity value from vcpupin output
        actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu)
        logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin)
        if expected_output == actual_output_vcpupin:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            test.fail("Cpu pinning details not updated properly in"
                      " virsh vcpupin command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            test.fail("Cpu pinning details are not "
                      "updated properly in /proc/"
                      "%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is positive case.
                test.fail(cmdResult)
            else:
                # Command fail and it is negative case.
                return
        else:
            if status_error:
                # Command success and it is negative case.
                test.fail(cmdResult)
            else:
                # Command success and it is positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        test.cancel("This version of libvirt doesn't"
                    " support this test")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no"))

    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if start_vm and vm.state() == "shut off":
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    # Find the alive cpus list
    cpus_list = map(str, cpuutils.cpu_online_list())
    logging.info("Active cpus in host are %s", cpus_list)

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        # Before doing any vcpupin actions, lets check whether
        # initial pinning state is fine
        if vcpupin_initial:
            pid = vm.get_pid()
            logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count)
            for vcpu in range(int(guest_vcpu_count)):
                vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid, vcpu_pid)
            return

        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                test.error("Need more than one domains")
            if not vm2:
                test.cancel("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_online_cpu_count = len(cpus_list)
        online_cpu_max = max(map(int, cpus_list))
        host_cpu_count = cpuutils.total_cpus_count()
        cpu_max = int(host_cpu_count) - 1
        if (host_online_cpu_count < 2) and (not cpu_list == "x"):
            test.cancel("We need more cpus on host in this "
                        "case for the cpu_list=%s. But "
                        "current number of cpu on host is %s."
                        % (cpu_list, host_online_cpu_count))

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (online_cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % online_cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    test.cancel("Cpu_list=%s is not recognized."
                                % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
Example #37
0
 def check_result(cmd, result, status_error):
     """
     Check virt-v2v command result
     """
     utlv.check_exit_status(result, status_error)
     output = result.stdout + result.stderr
     if status_error:
         if checkpoint == 'length_of_error':
             log_lines = output.split('\n')
             v2v_start = False
             for line in log_lines:
                 if line.startswith('virt-v2v:'):
                     v2v_start = True
                 if line.startswith('libvirt:'):
                     v2v_start = False
                 if v2v_start and line > 72:
                     raise exceptions.TestFail('Error log longer than 72 '
                                               'charactors: %s', line)
         else:
             error_map = {
                 'conflict_options': ['option used more than once'],
                 'xen_no_output_format': ['The input metadata did not define'
                                          ' the disk format']
             }
             if not utils_v2v.check_log(output, error_map[checkpoint]):
                 raise exceptions.TestFail('Not found error message %s' %
                                           error_map[checkpoint])
     else:
         if output_mode == "rhev" and checkpoint != 'quiet':
             ovf = get_ovf_content(output)
             logging.debug("ovf content: %s", ovf)
             if '--vmtype' in cmd:
                 expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                 check_vmtype(ovf, expected_vmtype)
         if '-oa' in cmd and '--no-copy' not in cmd:
             expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
             img_path = get_img_path(output)
             check_image(img_path, "allocation", expected_mode)
         if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
             expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
             img_path = get_img_path(output)
             check_image(img_path, "format", expected_format)
         if '-on' in cmd:
             expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
             check_new_name(output, expected_name)
         if '--no-copy' in cmd:
             check_nocopy(output)
         if '-oc' in cmd:
             expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
             check_connection(output, expected_uri)
         if output_mode == "rhev":
             if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                 raise exceptions.TestFail("Import VM failed")
             else:
                 params['vmcheck_flag'] = True
         if output_mode == "libvirt":
             if "qemu:///session" not in v2v_options:
                 virsh.start(vm_name, debug=True, ignore_status=False)
         if checkpoint == 'quiet':
             if len(output.strip()) != 0:
                 raise exceptions.TestFail('Output is not empty in quiet mode')
Example #38
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes'

    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            daemon_conf_dict = {
                "log_level": "1",
                "log_filters":
                "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"",
                "log_outputs": "\"1:file:{}\"".format(config_path)
            }
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()
        logging.debug("Before starting, VM xml:"
                      "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))
        # Start VM
        if start_vm_after_config:
            logging.info("Start VM with vcpu hotpluggable and order...")
            ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            if start_vm_after_config:
                # Wait for domain
                vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            if start_vm_after_config:
                cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" %
                       (vm_name, vcpus_max))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd and start_vm_after_config:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            expect_num = 2 if start_vm_after_config else 1
            if len(max_list) != expect_num:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            expect_num = vcpus_crt if start_vm_after_config else int(
                config_vcpus)
            if len(vcpu_lines) != expect_num:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if start_vm_after_config and not cpu.check_if_vm_vcpu_match(
                    vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                check_vcpu_after_plug_unplug(test, vm_name, config_vcpus)

            # Restart libvirtd
            libvirtd.restart()
            if config_vcpus and not start_vm_after_config:
                check_vm_exist(test, vm_name, 'shut off')
            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            if start_vm_after_config:
                en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
                for vcpu_sn in range(len(en_vcpu_list)):
                    vcpu_id = en_vcpu_list[vcpu_sn].split(
                        "=")[1].split()[0].strip('\'')
                    cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid())
                    cg_path = cg_obj.get_cgroup_path("cpuset")
                    if cg_obj.is_cgroup_v2_enabled():
                        vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id)
                    else:
                        vcpu_path = os.path.join(cg_path,
                                                 "../vcpu%s" % vcpu_id)
                    if not os.path.exists(vcpu_path):
                        test.fail(
                            "Failed to find the enabled vcpu{} in {}.".format(
                                vcpu_id, cg_path))
    finally:
        # Recover libvirtd configuration
        if config_libvirtd and 'daemon_conf' in locals():
            libvirt.customize_libvirt_config(None,
                                             remote_host=False,
                                             is_recover=True,
                                             config_object=daemon_conf)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #39
0
def run(test, params, env):
    """
    Test vsock device:

    1.Edit/start guest with vsock device.
    2.Hotplug/hotunplug vsock device.
    3.Coldplug/Coldunplug vsock device
    4.Check if hotplugged vsock communicates.
    """
    def remove_all_vsocks(vm, vmxml):
        """
        Removes all vsock devices from current domain
        to avoid failures due to original domain definition
        which must have been backed up earlier for correct
        restore.

        :param vm: the current test domain
        :param vmxml: VMXML of the current test domain
        :return: None
        """

        vmxml.remove_all_device_by_type('vsock')
        was_alive = vm.is_alive()
        vmxml.sync()
        if was_alive:
            vm.start()

    def env_setup():
        """
        1. Install build dependency for nc-vsock both on guest
        and host.
        2. Get nc-vsock code
        3. Build nc-vsock on both guest and host.
        :return: None
        """

        session = vm.wait_for_login()
        cmds = [
            'rpm -q lsof || yum -y install lsof',
            'rpm -q git || yum -y install git',
            'rpm -q make || yum -y install make',
            'rpm -q gcc && yum -y reinstall gcc || yum -y install gcc',
            'rm -rf %s' % NC_VSOCK_DIR,
            'git clone %s %s' % (git_repo, NC_VSOCK_DIR),
            'cd %s && make' % NC_VSOCK_DIR,
        ]
        for cmd in cmds:
            for where in ["guest", "host"]:
                session_arg = session if where == "guest" else None
                status, output = utils_misc.cmd_status_output(
                    cmd, shell=True, timeout=CMD_TIMEOUT, session=session_arg)
                cancel_if_failed(cmd, status, output, where)

        session.close()

    def cancel_if_failed(cmd, status, output, where):
        """
        Cancel test execution as soon as command failed reporting output

        :param cmd: Command that was executed
        :param status: Exit status of command
        :param output: Output of command
        :param where: "guest" or "host"
        :return:
        """

        if status:
            test.cancel("Failed to run %s on %s output: %s" %
                        (cmd, where, output))

    def write_from_host_to_guest():
        """
        1. Create file for stdin to nc-vsock
        2. Invoke nc-vsock as client writing to guest cid
        :return msg: The message sent to the server
        """

        msg = "message from client"
        process.run('echo %s > %s' % (msg, NC_VSOCK_CLI_TXT), shell=True)
        output = process.run(
            "%s %d %s < %s" %
            (NC_VSOCK_CMD, int(cid), VSOCK_PORT, NC_VSOCK_CLI_TXT),
            shell=True).stdout_text
        logging.debug(output)
        process.system_output('cat %s' % NC_VSOCK_CLI_TXT)
        return msg

    def wait_for_guest_to_receive(server):
        """
        nc-vsock server finishes as soon as it received data.
        We report if it's still running.

        :param server: The started server instance accepting requests
        :return: Nothing
        """

        server.join(5)
        if server.is_alive():
            logging.debug("The server thread is still running in the guest.")

    def validate_data_transfer_by_vsock():
        """
        1. Setup nc-vsock on host and guest
        2. Start vsock server on guest (wait a bit)
        3. Send message from host to guest using correct cid
        4. Get received message from vsock server
        5. Verify message was sent
        """

        env_setup()

        session = vm.wait_for_login()

        def _start_vsock_server_in_guest():
            """
            Starts the nc-vsock server to listen on VSOCK_PORT in the guest.
            The server will stop as soon as it received message from host.
            :return:
            """

            session.cmd("%s -l %s > %s" %
                        (NC_VSOCK_CMD, VSOCK_PORT, NC_VSOCK_SRV_OUT))

        server = Thread(target=_start_vsock_server_in_guest)
        server.start()
        time.sleep(5)

        sent_data = write_from_host_to_guest()
        wait_for_guest_to_receive(server)
        received_data = session.cmd_output('cat %s' % NC_VSOCK_SRV_OUT).strip()

        if not sent_data == received_data:
            test.fail("Data transfer error with vsock device\n"
                      "Sent: '%s'\n"
                      "Received:'%s'" % (sent_data, received_data))

        session.close()

    def managedsave_restore():
        """
        Check that vm can be saved and restarted with current configuration
        """

        result = virsh.managedsave(vm_name, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        result = virsh.start(vm_name)
        utils_test.libvirt.check_exit_status(result, expect_error=False)

    start_vm = params.get("start_vm", "no")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    auto_cid = params.get("auto_cid", "no")
    status_error = params.get("status_error", "no") == "yes"
    edit_xml = params.get("edit_xml", "no") == "yes"
    option = params.get("option", "")
    git_repo = params.get("git_repo", "")
    invalid_cid = params.get("invalid_cid", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    no_vsock = params.get("no_vsock", "no") == "yes"
    vsock_num = params.get("num")
    communication = params.get("communication", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    remove_all_vsocks(vm, vmxml)

    # Define vsock device xml
    vsock_dev = Vsock()
    vsock_dev.model_type = "virtio"
    if process.run("modprobe vhost_vsock").exit_status != 0:
        test.fail("Failed to load vhost_vsock module")
    if invalid_cid:
        cid = "-1"
    else:
        cid = random.randint(3, 10)
        vsock_dev.cid = {'auto': auto_cid, 'address': cid}
        vsock_dev.alias = {'name': 'ua-' + str(uuid.uuid1())}
    logging.debug(vsock_dev)

    if start_vm == "no" and vm.is_alive():
        virsh.destroy()

    try:
        if edit_xml:
            edit_status1 = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                      re.M)[0].replace("/", "\/"))])
            edit_status2 = True
            if vsock_num == 2:
                edit_status2 = libvirt.exec_virsh_edit(
                    vm_name, [(r":/<devices>/s/$/%s" %
                               re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                          re.M)[0].replace("/", "\/"))])
                logging.debug(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))

            if status_error:
                if edit_status1 or edit_status2:
                    test.fail("virsh edit should fail\n")
            else:
                if not edit_status1:
                    test.fail("Failed to edit vm xml with vsock device\n")
                else:
                    result = virsh.start(vm_name, debug=True)
                    utils_test.libvirt.check_exit_status(result,
                                                         expect_error=False)
        else:
            session = vm.wait_for_login()
            session.close()
            result = virsh.attach_device(vm_name,
                                         vsock_dev.xml,
                                         flagstr=option,
                                         debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            if option == "--config":
                result = virsh.start(vm_name, debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(vmxml)
            vsock_list = vmxml.devices.by_device_tag("vsock")
            cid = vsock_list[0].cid['address']
            if 0 == len(vsock_list):
                test.fail("No vsock device found in live xml\n")
            if communication:
                validate_data_transfer_by_vsock()
            if managedsave and not no_vsock:
                managedsave_restore()

            def _detach_completed():
                status = process.run("lsof /dev/vhost-vsock",
                                     ignore_status=True,
                                     shell=True).exit_status
                return status == 1

            if detach_device_alias:
                result = virsh.detach_device_alias(vm.name,
                                                   vsock_dev.alias['name'],
                                                   ignore_status=False,
                                                   debug=True,
                                                   wait_for_event=True,
                                                   event_timeout=20)
            else:
                result = virsh.detach_device(vm_name,
                                             vsock_dev.xml,
                                             debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            utils_misc.wait_for(_detach_completed, timeout=20)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vsock_list = vmxml.get_devices("vsock")
            if vsock_list:
                test.fail(
                    "Still find vsock device in live xml after hotunplug\n")
            if managedsave and no_vsock:
                managedsave_restore()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Example #40
0
def run(test, params, env):
    """
    Test the video virtual devices

    1. prepare a guest with different video devices
    2. check whether the guest can be started, and set
       the related params
    3. check the qemu cmd line and the params
    """
    def check_heads_test_xml(model_type, is_primary=None, **kwargs):
        """
        Check whether the added devices and attributes are shown
        in the guest xml
        """
        if is_primary or is_primary is None:
            model_heads = kwargs.get("model_heads", default_primary_heads)
        else:
            model_heads = kwargs.get("model_heads", default_secondary_heads)
        pattern_model_type = "type=\'%s\'" % model_type
        pattern_heads = "heads=\'%s\'" % model_heads
        xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name))

        for line in xml_after_adding_device.splitlines():
            if pattern_model_type in line:
                if pattern_heads not in line:
                    test.fail("Can not find %s video device or heads num"
                              "is not as settings in the xml" % model_type)

    def check_mem_test_xml(model_type, mem_type, mem_size):
        """
        Check whether the added devices and attributes are shown
        in the guest xml
        """
        pattern_model_type = "type=\'%s\'" % model_type
        pattern_mem = "%s=\'%s\'" % (mem_type, mem_size)
        xml_after_adding_device = str(VMXML.new_from_dumpxml(vm_name))

        for line in xml_after_adding_device.splitlines():
            if pattern_model_type in line:
                if pattern_mem not in line:
                    test.fail("Can not find %s video device or memory mem_size"
                              "for %s is not as settings in the xml" %
                              (model_type, mem_type))

    def add_video_device(video_model,
                         domain_xml,
                         is_primary=None,
                         status_error=False,
                         **kwargs):
        """
        add the video device xml snippet, then sync the guest xml
        """
        video_dev = Video()
        video_dev.model_type = video_model
        if is_primary:
            video_dev.primary = "yes"

        for key, value in list(iteritems(kwargs)):
            setattr(video_dev, key, value)
        domain_xml.add_device(video_dev)
        try:
            # Take relevant line only from the XML (without header)
            video_xml_string = str(video_dev).split('\n')[-1]
            # Prepare a string for VI to replace with it using virsh edit utility
            replace_string = r":%s:<video>\_.\{-}</video>:" + video_xml_string + ":"
            status = libvirt.exec_virsh_edit(vm_name, [replace_string])
            if status:
                domain_xml.sync()
            else:
                # Raise exception which is handled right after in except block.
                raise Exception('Virsh edit has failed, but that is '
                                'intentional in negative cases.')
        except Exception as error:
            logging.debug(error)
            if not status_error:
                test.fail(
                    "Failed to define the guest after adding the %s video "
                    "device xml. Details: %s " % (video_model, error))
            logging.debug("This is the expected failing in negative cases.")
        else:
            if status_error:
                test.fail("xml sync should failed as it is a negative case.")
            logging.debug("Add devices succeed in positive case.")

    def check_model_test_cmd_line(model_type, is_primary=None):
        """
        Check whether the added video devices are shown in the qemu cmd line
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace(
            "\x00", " ")
        logging.debug("the cmdline is: %s" % cmdline)
        # s390x only supports virtio
        s390x_pattern = r"-device.*virtio-gpu-ccw"
        # aarch64 only supports virtio
        aarch64_pattern = r"-device.*virtio-gpu-pci"

        if is_primary or is_primary is None:
            if model_type == "vga":
                pattern = r"-device.*VGA"
            else:
                pattern = r"-device.*%s-vga" % model_type
            if guest_arch == 's390x':
                pattern = s390x_pattern
            elif guest_arch == 'aarch64':
                pattern = aarch64_pattern
            if not re.search(pattern, cmdline):
                test.fail("Can not find the primary %s video device "
                          "in qemu cmd line." % model_type)
        else:
            if model_type == "qxl":
                pattern = r"-device.*qxl,"
            elif model_type == "virtio":
                pattern = r"-device.*virtio-gpu-pci"
                if with_packed:
                    pattern = r"-device.*virtio-gpu-pci.*packed\W{1,2}(true|on)"
            if guest_arch == 's390x':
                pattern = s390x_pattern
            if not re.search(pattern, cmdline):
                test.fail("Can not find the secondary %s video device "
                          "in qemu cmd line." % model_type)

    def check_heads_test_cmd_line(model_type, is_primary=None, **kwargs):
        """
        Check whether the heads number of video devices in the qemu cmd line
        are just the same with settings.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace(
            "\x00", " ")
        logging.debug("the cmdline is: %s" % cmdline)
        # s390x only supports virtio
        s390x_pattern = r"-device.*virtio-gpu-ccw.*max_outputs\W{1,2}%s"
        # aarch64 only supports virtio
        aarch64_pattern = r"-device.*virtio-gpu-pci.*max_outputs\W{1,2}%s"

        if is_primary or is_primary is None:
            model_heads = kwargs.get("model_heads", default_primary_heads)
            if model_type == "qxl" or model_type == "virtio":
                pattern = r"-device.*%s-vga.*max_outputs\W{1,2}%s" % (
                    model_type, model_heads)
                if guest_arch == 's390x':
                    pattern = s390x_pattern % model_heads
                elif guest_arch == 'aarch64':
                    pattern = aarch64_pattern % model_heads
                if not re.search(pattern, cmdline):
                    test.fail(
                        "The heads number of the primary %s video device "
                        "in not correct." % model_type)
        else:
            model_heads = kwargs.get("model_heads", default_secondary_heads)
            if model_type == "qxl":
                pattern = r"-device\sqxl\S+max_outputs=%s" % model_heads
            elif model_type == "virtio":
                pattern = r"-device.*virtio-gpu-pci.*max_outputs\W{1,2}%s" % model_heads
            if guest_arch == 's390x':
                pattern = s390x_pattern % model_heads
            if not re.search(pattern, cmdline):
                test.fail("The heads number of the secondary %s video device "
                          "in not correct." % model_type)

    def check_mem_test_cmd_line(model_type, mem_type, mem_size):
        """
        Check whether the video memory of video devices in the qemu cmd line
        are just the same with settings.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read().replace(
            "\x00", " ")
        logging.debug("the cmdline is: %s" % cmdline)

        if mem_type == "ram" or mem_type == "vram":
            cmd_mem_size = str(int(mem_size) * 1024)
            pattern = r"-device.*qxl-vga.*%s_size\W{1,2}%s" % (mem_type,
                                                               cmd_mem_size)
        if mem_type == "vram" and model_type == "vga":
            cmd_mem_size = str(int(mem_size) // 1024)
            pattern = r"-device.*VGA.*vgamem_mb\W{1,2}%s" % cmd_mem_size
        if mem_type == "vgamem":
            cmd_mem_size = str(int(mem_size) // 1024)
            pattern = r"-device.*qxl-vga.*vgamem_mb\W{1,2}%s" % cmd_mem_size
        if mem_type == "vram64":
            cmd_mem_size = str(int(mem_size) // 1024)
            pattern = r"-device.*qxl-vga.*vram64_size_mb\W{1,2}%s" % cmd_mem_size

        if not re.search(pattern, cmdline):
            test.fail("The %s memory size of %s video device "
                      "in not correct." % (mem_type, model_type))

    def up_round_to_power_of_two(num):
        power = ceil(log(int(num), 2))
        return pow(2, power)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    status_error = params.get("status_error", "no") == "yes"
    model_test = params.get("model_test", "no") == "yes"
    primary_video_model = params.get("primary_video_model")
    secondary_video_model = params.get("secondary_video_model", None)
    heads_test = params.get("heads_test", "no") == "yes"
    default_primary_heads = params.get("default_primary_heads", None)
    default_secondary_heads = params.get("default_secondary_heads", None)
    primary_heads = params.get("primary_heads", None)
    secondary_heads = params.get("secondary_heads", None)
    mem_test = params.get("mem_test", "no") == "yes"
    mem_type = params.get("mem_type", None)
    mem_size = params.get("mem_size", None)
    default_mem_size = params.get("default_mem_size", None)
    zero_size_test = params.get("zero_size_test", None) == "yes"
    non_power_of_2_test = params.get("non_power_of_2_test", None) == "yes"
    guest_arch = params.get("vm_arch_name")
    with_packed = params.get("with_packed", "no") == "yes"
    driver_packed = params.get("driver_packed", "on")

    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()

    if with_packed and not libvirt_version.version_compare(6, 3, 0):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")

    try:
        vm_xml.remove_all_device_by_type('video')
        kwargs = {}
        model_type = primary_video_model
        is_primary = None
        if secondary_video_model:
            is_primary = True
        if heads_test and not default_primary_heads:
            kwargs["model_heads"] = primary_heads
        if mem_test and not default_mem_size:
            kwargs["model_" + mem_type] = mem_size
        if model_type == "virtio" and with_packed:
            kwargs["driver_packed"] = driver_packed
        add_video_device(model_type, vm_xml, is_primary, status_error,
                         **kwargs)

        if secondary_video_model:
            kwargs = {}
            model_type = secondary_video_model
            is_primary = False
            if heads_test and not default_secondary_heads:
                kwargs["model_heads"] = secondary_heads
            if model_type == "virtio" and with_packed:
                kwargs["driver_packed"] = driver_packed
            add_video_device(model_type, vm_xml, is_primary, status_error,
                             **kwargs)

        if not status_error:
            res = virsh.start(vm_name)

            if res.exit_status:
                test.fail("failed to start vm after adding the video "
                          "device xml. details: %s " % res)
            logging.debug("vm started successfully in positive cases.")

            if model_test:
                check_model_test_cmd_line(model_type, is_primary)

            if heads_test:
                check_heads_test_xml(model_type, is_primary, **kwargs)
                check_heads_test_cmd_line(model_type, is_primary, **kwargs)

            if mem_test:
                if mem_size is None:
                    mem_size = default_mem_size
                if zero_size_test:
                    mem_size = params.get("mem_size_after_define")
                if non_power_of_2_test:
                    mem_size = up_round_to_power_of_two(mem_size)

                check_mem_test_xml(model_type, mem_type, mem_size)
                check_mem_test_cmd_line(model_type, mem_type, mem_size)
    finally:
        if vm.is_alive():
            vm.destroy(vm_name)
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # run test
    if vm_ref == "" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            try:
                status, output = session.cmd_status_output(cmd, timeout=10)
            finally:
                session.close()
            if not re.search("processor", output):
                raise error.TestFail("Unable to read /proc/cpuinfo")
        tmp_file = os.path.join(test.tmpdir, "save.file")
        virsh.save(vm_name, tmp_file)
        if vm_ref == "saved_file":
            vm_ref = tmp_file
        elif vm_ref == "empty_new_file":
            tmp_file = os.path.join(test.tmpdir, "new.file")
            open(tmp_file, 'w').close()
            vm_ref = tmp_file
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    if not status_error:
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    try:
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            if not re.search(vm_name, list_output):
                raise error.TestFail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after restore"
                                         " due to the option --paused")
            if extra_param.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after restore"
                                         " due to the option --running")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Example #43
0
def run(test, params, env):
    """
    Test the input virtual devices

    1. prepare a guest with different input devices
    2. check whether the guest can be started
    3. check the qemu cmd line
    """
    def check_dumpxml():
        """
        Check whether the added devices are shown in the guest xml
        """
        pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type)
        if with_packed:
            pattern = "<driver packed=\"%s\"" % (driver_packed)
        logging.debug('Searching for %s in vm xml', pattern)
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug('xml_after_adding_device:\n%s', xml_after_adding_device)
        if pattern not in str(xml_after_adding_device):
            test.fail("Can not find the %s input device xml "
                      "in the guest xml file." % input_type)

    def check_qemu_cmd_line():
        """
        Check whether the added devices are shown in the qemu cmd line
        """
        # if the tested input device is a keyboard or mouse with ps2 bus,
        # there is no keyboard or mouse in qemu cmd line
        if bus_type == "ps2" and input_type in ["keyboard", "mouse"]:
            return
        with open('/proc/%s/cmdline' % vm.get_pid(), 'r') as cmdline_file:
            cmdline = cmdline_file.read()
        if bus_type == "usb" and input_type == "keyboard":
            pattern = r"-device.*%s-kbd" % bus_type
        elif input_type == "passthrough":
            pattern = r"-device.*%s-input-host-pci" % bus_type
        else:
            pattern = r"-device.*%s-%s" % (bus_type, input_type)
        if not re.search(pattern, cmdline):
            test.fail("Can not find the %s input device "
                      "in qemu cmd line." % input_type)
        if with_packed:
            pattern = r"packed.*%s" % driver_packed
            if not re.search(pattern, cmdline):
                test.fail("Can not find the packed driver "
                          "in qemu cmd line")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    machine_type = params.get('machine_type', '')
    status_error = params.get("status_error", "no") == "yes"
    with_packed = params.get("with_packed", "no") == "yes"
    driver_packed = params.get("driver_packed", "on")
    bus_type = params.get("bus_type")
    input_type = params.get("input_type")

    check_preconditions(bus_type, input_type, with_packed, test)

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()

    try:
        # ps2 keyboard and ps2 mouse are default, no need to re-add the xml,
        # unless it's machine_type is pseries
        if not (bus_type == "ps2" and input_type in ["keyboard", "mouse"]
                and machine_type != 'pseries'):
            vm_xml.remove_all_device_by_type('input')
            input_dev = Input(type_name=input_type)
            input_dev.input_bus = bus_type
            if input_type == "passthrough":
                kbd_dev_name = glob.glob('/dev/input/by-path/*kbd')
                if not kbd_dev_name:
                    test.cancel("There is no keyboard device on this host.")
                logging.debug("keyboard %s is going to be passthrough "
                              "to the host.", kbd_dev_name[0])
                input_dev.source_evdev = kbd_dev_name[0]
            if with_packed:
                input_dev.driver_packed = driver_packed
            vm_xml.add_device(input_dev)
            try:
                vm_xml.sync()
            except Exception as error:
                if not status_error:
                    test.fail("Failed to define the guest after adding the %s input "
                              "device xml. Details: %s " % (input_type, error))
                logging.debug("This is the expected failing in negative cases.")
                return

        res = virsh.start(vm_name)
        if res.exit_status:
            if not status_error:
                test.fail("Failed to start vm after adding the %s input "
                          "device xml. Details: %s " % (input_type, res.stderr))
            logging.debug("This is the expected failure in negative cases.")
            return
        if status_error:
            test.fail("Expected fail in negative cases but vm started successfully.")
            return

        logging.debug("VM started successfully in positive cases.")
        check_dumpxml()
        check_qemu_cmd_line()
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()
Example #44
0
def run(test, params, env):
    """
    1. prepare env
    2. check if nwfilter binding
    3. run test
    4. destroy vm and restore the status
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    status_error = "yes" == params.get("status_error")
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    filter_name = params.get("filter_name")
    is_nwfilter_define = "yes" == params.get("is_nwfilter_define")
    vnet0_binding = os.path.join(data_dir.get_tmp_dir(), "vnet0_binding.xml")
    filter_binding_name = params.get("filter_binding_name")
    failed_msg = params.get("expected_failed")
    target_dev = params.get("target_dev")
    source_network = params.get("source_network")
    source_bridge = params.get("source_bridge")
    alias_name = params.get("alias_name")

    def set_env():
        """
        prepare the vm interface xml
        this xml can both use in two scenario.
        but little different for two scenario
        """
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_iface.type_name = "network"
        iface_target = {'dev': target_dev}
        new_iface.target = iface_target
        source = {'network': source_network, 'bridge': source_bridge}
        new_iface.source = source
        filterrefs_dict = {}
        filterrefs_dict['name'] = filter_name
        filterrefs_dict['parameters'] = []
        new_filterref = new_iface.new_filterref(**filterrefs_dict)
        new_iface.filterref = new_filterref
        alias_dict = {'name': alias_name}
        new_iface.alias = alias_dict
        vmxml.add_device(new_iface)
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.sync()
        return new_iface

    def check_binding_port(cmd_res, match, is_match=True):
        """
        check the list binding ports
        """
        list_res = cmd_res.stdout_text.strip()
        if list_res and re.search(match, list_res):
            if not is_match:
                test.fail("expected not match %s" % match)
        elif is_match:
            test.fail("expected match %s but not match" % match)

    try:
        # set new interface env
        new_iface = set_env()
        # create binding dump file
        pkg_mgr = utils_package.package_manager(None, 'libvirt-daemon-config-nwfilter')
        pkg_mgr.install()
        virsh.start(vm_name, debug=True)
        ret = virsh.nwfilter_binding_dumpxml(new_iface.target['dev'],
                                             to_file=vnet0_binding,
                                             debug=True)
        utlv.check_exit_status(ret, status_error)
        binding = nwfilter_binding.NwfilterBinding()
        binding.xml = vnet0_binding
        filterrefs_dict = {}
        filterrefs_dict['name'] = filter_binding_name
        filterrefs_dict['parameters'] = [
            {'name': "MAC", 'value': new_iface.mac_address}]
        binding.filterref = binding.new_filterref(**filterrefs_dict)
        logging.debug("binding is %s" % binding)
        # list filter
        if not is_nwfilter_define:
            virsh.nwfilter_binding_delete(new_iface.target['dev'], debug=True)
        if is_nwfilter_define:
            ret = virsh.nwfilter_binding_list(debug=True)
            utlv.check_exit_status(ret, status_error)
            check_binding_port(ret, filter_name, is_match=True)

        ret_create = virsh.nwfilter_binding_create(binding.xml, debug=True)
        # two Scenario
        if is_nwfilter_define:
            utlv.check_result(ret_create, failed_msg)

        elif not is_nwfilter_define:
            # get params for senario2
            check_cmd = params.get("check_cmd")
            expected_match = params.get("expected_match")
            filter_binding_copy = params.get("filter_binding_copy")

            ret = virsh.nwfilter_binding_list(debug=True)
            check_binding_port(ret, filter_binding_name, is_match=True)

            utlv.check_cmd_output(check_cmd, expected_match, True)
            utils_libvirtd.libvirtd_restart()
            ret = virsh.nwfilter_binding_list(debug=True)
            check_binding_port(ret, filter_binding_name, is_match=True)
            # use check command to check result
            utlv.check_cmd_output(check_cmd, expected_match, True)
            new_binding = nwfilter_binding.NwfilterBinding()
            new_binding.xml = binding.xml
            filterrefs_dict = {}
            filterrefs_dict['name'] = filter_binding_copy
            filterrefs_dict['parameters'] = [
                {'name': "MAC", 'value': new_iface.mac_address}]
            binding.filterref = binding.new_filterref(**filterrefs_dict)
            logging.debug("binding is %s" % new_binding)
            ret_create = virsh.nwfilter_binding_create(new_binding.xml,
                                                       debug=True)
            utlv.check_result(ret_create, failed_msg)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #45
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    host_arch = platform.machine()
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            iface.target = {"dev": target_dev}
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            # Set vm memory to 2G if it's larger than 2G
            if vmxml.memory > 2097152:
                vmxml.memory = vmxml.current_mem = 2097152

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
            else:
                logging.info("Find %s=%s in vm xml" %
                             (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search(r"macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")
        # Check if the "target dev" is set successfully
        # 1. Target dev name with prefix as "vnet" will always be override;
        # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct
        # type interface will be override;
        # 3. Other scenarios, the target dev should be set successfully.
        if test_target:
            if target_dev != iface.target["dev"]:
                if target_dev.startswith("vnet") or \
                        (iface_type == "direct" and
                         (target_dev.startswith("macvtap") or
                          target_dev.startswith("macvlan"))):
                    logging.debug("target dev %s is override" % target_dev)
                else:
                    test.fail("Failed to set target dev to %s", target_dev)
            else:
                logging.debug("target dev set successfully to %s",
                              iface.target["dev"])

    def run_cmdline_test(iface_mac, host_arch):
        """
        Test qemu command line
        :param iface_mac: expected MAC
        :param host_arch: host architecture, e.g. x86_64
        :raise avocado.core.exceptions.TestError: if preconditions are not met
        :raise avocado.core.exceptions.TestFail: if commandline doesn't match
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            if host_arch == 's390x':
                model_option = "device virtio-net-ccw"
            else:
                model_option = "device virtio-net-pci"
        elif iface_model == 'rtl8139':
            model_option = "device rtl8139"
        else:
            test.error(
                "Don't know which device driver to expect on qemu cmdline"
                " for iface_model %s" % iface_model)
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    if "pci" in model_option:
                        driver_dict["vectors"] = str(
                            int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            logging.debug(
                "Duplicated IP address on guest. Check bug: "
                "https://bugzilla.redhat.com/show_bug.cgi?id=1147238")
        for vm_ip in vm_ips:
            if not vm_ip or vm_ip != expect_ip:
                logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip)
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = str(utils_net.get_default_gateway(False, session))
        if expect_gw not in gateway:
            test.fail("The gateway on guest is %s, while expect is %s" %
                      (gateway, expect_gw))
        # Check dns server address
        ns_list = utils_net.get_guest_nameserver(session)
        if expect_ns not in ns_list:
            test.fail("The dns found is %s, which expect is %s" %
                      (ns_list, expect_ns))

    def check_mcast_network(session, add_session):
        """
        Check multicast ip address on guests

        :param session: vm session
        :param add_session: additional vm session
        """
        src_addr = ast.literal_eval(iface_source)['address']
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    def get_iface_model(iface_model, host_arch):
        """
        Get iface_model. On s390x use default model 'virtio' if non-virtio given
        :param iface_model: value as by test configuration or default
        :param host_arch: host architecture, e.g. x86_64
        :return: iface_model
        """
        if 's390x' == host_arch and 'virtio' not in iface_model:
            return "virtio"
        else:
            return iface_model

    def check_vhostuser_guests(session1, session2):
        """
        Check the vhostuser interface in guests

        param session1: Session of original guest
        param session2: Session of original additional guest
        """
        logging.debug("iface details is %s" %
                      libvirt.get_interface_details(vm_name))
        vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac'])
        vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac'])

        utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip)
        utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip)
        ping_status, ping_output = utils_net.ping(dest=guest2_ip,
                                                  count='3',
                                                  timeout=5,
                                                  session=session1)
        logging.info("output:%s" % ping_output)
        if ping_status != 0:
            if ping_expect_fail:
                logging.info("Can not ping guest2 as expected")
            else:
                test.fail("Can not ping guest2 from guest1")
        else:
            if ping_expect_fail:
                test.fail("Ping guest2 successfully not expected")
            else:
                logging.info("Can ping guest2 from guest1")

    def get_ovs_statis(ovs):
        """
        Get ovs-vsctl interface statistics and format in dict

        param ovs: openvswitch instance
        """
        ovs_statis_dict = {}
        ovs_iface_info = ovs.ovs_vsctl(["list",
                                        "interface"]).stdout_text.strip()
        ovs_iface_list = re.findall(
            'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info,
            re.S)
        logging.info("ovs iface list is %s", ovs_iface_list)
        # Dict of iface name and statistics
        for iface_name in vhostuser_names.split():
            for ovs_iface in ovs_iface_list:
                if iface_name == eval(ovs_iface[0]):
                    format_statis = dict(
                        re.findall(r'(\S*?)=(\d*?),', ovs_iface[1]))
                    ovs_statis_dict[iface_name] = format_statis
                    break
        return ovs_statis_dict

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = get_iface_model(params.get("iface_model", "virtio"),
                                  host_arch)
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    ovs_br_name = params.get("ovs_br_name")
    vhostuser_names = params.get("vhostuser_names")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1")
    guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")
    set_ip = "yes" == params.get("set_user_ip", "no")
    set_ips = params.get("set_ips", "").split()
    expect_ip = params.get("expect_ip")
    expect_gw = params.get("expect_gw")
    expect_ns = params.get("expect_ns")
    test_target = "yes" == params.get("test_target", "no")
    target_dev = params.get("target_dev", None)

    # test params for vhostuser test
    huge_page = ast.literal_eval(params.get("huge_page", "{}"))
    numa_cell = ast.literal_eval(params.get("numa_cell", "{}"))
    additional_iface_source = ast.literal_eval(
        params.get("additional_iface_source", "{}"))
    vcpu_num = params.get("vcpu_num")
    cpu_mode = params.get("cpu_mode")
    hugepage_num = params.get("hugepage_num")
    log_pattern = params.get("log_pattern")

    # judgement params for vhostuer test
    need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no")
    ping_expect_fail = "yes" == params.get("ping_expect_fail", "no")
    check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no")
    check_statistics = "yes" == params.get("check_statistics", "no")
    enable_multiqueue = "yes" == params.get("enable_multiqueue", "no")

    queue_size = None
    if iface_driver:
        driver_dict = ast.literal_eval(iface_driver)
        if "queues" in driver_dict:
            queue_size = int(driver_dict.get("queues"))

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    libvirtd_log_path = None
    libvirtd_conf = None
    if check_libvirtd_log:
        libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
        libvirtd.restart()

    # Prepare vhostuser
    ovs = None
    if need_vhostuser_env:
        # Reserve selinux status
        selinux_mode = utils_selinux.get_status()
        # Reserve orig page size
        orig_size = utils_memory.get_num_huge_pages()
        ovs_dir = data_dir.get_tmp_dir()
        ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name,
                                            vhostuser_names, queue_size)

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if test_target:
                logging.debug("Setting target device name to %s", target_dev)
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Add hugepage and update cpu for vhostuser testing
            if huge_page:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                membacking = vm_xml.VMMemBackingXML()
                hugepages = vm_xml.VMHugepagesXML()
                pagexml = hugepages.PageXML()
                pagexml.update(huge_page)
                hugepages.pages = [pagexml]
                membacking.hugepages = hugepages
                vmxml.mb = membacking

                vmxml.vcpu = int(vcpu_num)
                cpu_xml = vm_xml.VMCPUXML()
                cpu_xml.xml = "<cpu><numa/></cpu>"
                cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell])
                cpu_xml.mode = cpu_mode
                if cpu_mode == "custom":
                    vm_capability = capability_xml.CapabilityXML()
                    cpu_xml.model = vm_capability.model
                vmxml.cpu = cpu_xml

                vmxml.sync()
                logging.debug("xmltreefile:%s", vmxml.xmltreefile)

            # Clone additional vm
            if additional_guest:
                add_vm_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                add_vm_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(add_vm_name)
                # Update iface source if needed
                if additional_iface_source:
                    add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name)
                    add_xml_devices = add_vmxml.devices
                    add_iface_index = add_xml_devices.index(
                        add_xml_devices.by_device_tag("interface")[0])
                    add_iface = add_xml_devices[add_iface_index]
                    add_iface.source = additional_iface_source
                    add_vmxml.devices = add_xml_devices
                    add_vmxml.xmltreefile.write()
                    add_vmxml.sync()

                    logging.debug("add vm xmltreefile:%s",
                                  add_vmxml.xmltreefile)
                additional_vm.start()
                # additional_vm.wait_for_login()
                username = params.get("username")
                password = params.get("password")
                add_session = additional_vm.wait_for_serial_login(
                    username=username, password=password)

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      60)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac, host_arch)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session, add_session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search(
                                r"RX:\s*%s" % driver_dict['rx_queue_size'],
                                outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search(
                                r"TX:\s*%s" % driver_dict['tx_queue_size'],
                                outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")
            if test_target:
                logging.debug("Check if the target dev is set")
                run_xml_test(iface_mac)

            # Check vhostuser guest
            if additional_iface_source:
                check_vhostuser_guests(session, add_session)

            # Check libvirtd log
            if check_libvirtd_log:
                find = 0
                with open(libvirtd_log_path) as f:
                    lines = "".join(f.readlines())
                    if log_pattern in lines:
                        logging.info("Finding msg<%s> in libvirtd log",
                                     log_pattern)
                    else:
                        test.fail("Can not find msg:<%s> in libvirtd.log" %
                                  log_pattern)

            # Check statistics
            if check_statistics:
                session.sendline("ping %s" % guest2_ip)
                add_session.sendline("ping %s" % guest1_ip)
                time.sleep(5)
                vhost_name = vhostuser_names.split()[0]
                ovs_statis_dict = get_ovs_statis(ovs)[vhost_name]
                domif_info = {}
                domif_info = libvirt.get_interface_details(vm_name)
                virsh.domiflist(vm_name, debug=True)
                domif_stat_result = virsh.domifstat(vm_name, vhost_name)
                if domif_stat_result.exit_status != 0:
                    test.fail("domifstat cmd fail with msg:%s" %
                              domif_stat_result.stderr)
                else:
                    domif_stat = domif_stat_result.stdout.strip()
                logging.debug("vhost_name is %s, domif_stat is %s", vhost_name,
                              domif_stat)
                domif_stat_dict = dict(
                    re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat))
                logging.debug("ovs_statis is %s, domif_stat is %s",
                              ovs_statis_dict, domif_stat_dict)
                ovs_cmp_dict = {
                    'tx_bytes': ovs_statis_dict['rx_bytes'],
                    'tx_drop': ovs_statis_dict['rx_dropped'],
                    'tx_errs': ovs_statis_dict['rx_errors'],
                    'tx_packets': ovs_statis_dict['rx_packets'],
                    'rx_bytes': ovs_statis_dict['tx_bytes'],
                    'rx_drop': ovs_statis_dict['tx_dropped']
                }
                logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict)
                for dict_key in ovs_cmp_dict.keys():
                    if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]:
                        test.fail(
                            "Find ovs %s result (%s) different with domifstate result (%s)"
                            % (dict_key, ovs_cmp_dict[dict_key],
                               domif_stat_dict[dict_key]))
                    else:
                        logging.info("ovs %s value %s is same with domifstate",
                                     dict_key, domif_stat_dict[dict_key])

            # Check multi_queue
            if enable_multiqueue:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                for comb_size in (queue_size, queue_size - 1):
                    logging.info("Setting multiqueue size to %s" % comb_size)
                    session.cmd_status("ethtool -L %s combined %s" %
                                       (ifname_guest, comb_size))
                    ret, outp = session.cmd_status_output("ethtool -l %s" %
                                                          ifname_guest)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != queue_size or int(cur_comb) != int(
                                comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s, queue_size: %s"
                                % (comb_size, pre_comb, cur_comb, queue_size))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)

            session.close()
            if additional_guest:
                add_session.close()

            # Restart libvirtd and guest, then test again
            if restart_libvirtd:
                libvirtd.restart()

            if restart_vm:
                vm.destroy(gracefully=True)
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, **virsh_dargs)
            process.run('rm -f %s' % dst_disk, shell=True)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=True)
        vmxml_backup.sync()

        if need_vhostuser_env:
            utils_net.clean_ovs_env(selinux_mode=selinux_mode,
                                    page_size=orig_size,
                                    clean_ovs=True)

        if libvirtd_conf:
            libvirtd_conf.restore()
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Example #46
0
def run(test, params, env):
    """
    Test Boot OVMF Guest and Seabios Guest with options

    Steps:
    1) Edit VM xml with specified options
    2) For secure boot mode, boot OVMF Guest from
       cdrom first, enroll the key, then switch
       boot from hd
    3) For normal boot mode, directly boot Guest from given device
    4) Verify if Guest can boot as expected
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    username = params.get("username", "root")
    password = params.get("password", "redhat")
    test_cmd = params.get("test_cmd", "")
    expected_output = params.get("expected_output", "")
    check_point = params.get("checkpoint", "")
    status_error = "yes" == params.get("status_error", "no")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    release_os_url = params.get("release_os_url", "")
    download_released_file_path = os.path.join(data_dir.get_tmp_dir(),
                                               "released_os.qcow2")
    uefi_iso = params.get("uefi_iso", "")
    custom_codes = params.get("uefi_custom_codes", "")
    uefi_target_dev = params.get("uefi_target_dev", "")
    uefi_device_bus = params.get("uefi_device_bus", "")
    with_boot = (params.get("with_boot", "no") == "yes")
    boot_ref = params.get("boot_ref", "dev")
    boot_order = params.get("boot_order", "1")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    vol_name = params.get("vol_name")
    brick_path = os.path.join(test.virtdir, "gluster-pool")
    boot_type = params.get("boot_type", "seabios")
    boot_loadparm = params.get("boot_loadparm", None)
    libvirt_version.is_libvirt_feature_supported(params)

    # Prepare result checkpoint list
    check_points = []
    if check_point:
        check_points.append(check_point)

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    keyring_file = ''
    try:
        # Create config file if it doesn't exist
        ceph_cfg = ceph.create_config_file(params.get("mon_host"))
        keyring_file = ceph.create_keyring_file(params.get("client_name"),
                                                params.get('client_key'))
        setup_test_env(params, test)
        apply_boot_options(vmxml, params, test)
        blk_source = vm.get_first_disk_devices()['source']
        set_domain_disk(vmxml, blk_source, params, test)
        vmxml.remove_all_boots()
        if with_boot:
            boot_kwargs = {
                "boot_ref": boot_ref,
                "boot_dev": boot_dev,
                "boot_order": boot_order,
                "target_dev": target_dev,
                "loadparm": boot_loadparm
            }
            if "yes" == params.get("two_same_boot_dev", "no"):
                boot_kwargs.update({"two_same_boot_dev": True})
            set_boot_dev_or_boot_order(vmxml, **boot_kwargs)
        define_error = ("yes" == params.get("define_error", "no"))
        enable_normal_boot(vmxml, check_points, define_error, test)
        # Some negative cases failed at virsh.define
        if define_error:
            return

        # Start VM and check result
        # For boot from cdrom or non_released_os, just verify key words from serial console output
        # For boot from disk image, run 'test cmd' to verify if OS boot well
        if boot_dev == "cdrom" or non_release_os_url:
            if not vm.is_alive():
                vm.start()
                check_prompt = params.get("check_prompt", "")
                while True:
                    if boot_type == "ovmf":
                        match, text = vm.serial_console.read_until_any_line_matches(
                            [check_prompt], timeout=30.0, internal_timeout=0.5)
                    else:
                        match, text = read_until_any_line_matches(
                            vm.serial_console, [check_prompt],
                            timeout=30.0,
                            internal_timeout=0.5)
                    logging.debug("matches %s", check_prompt)
                    if match == -1:
                        logging.debug("Got check point as expected")
                        break
        elif boot_dev == "hd":
            ret = virsh.start(vm_name, timeout=60)
            utlv.check_result(ret, expected_fails=check_points)
            # For no boot options, further check if boot dev can be automatically added
            if not with_boot:
                if re.search(r"<boot dev='hd'/>",
                             virsh.dumpxml(vm_name).stdout.strip()):
                    logging.debug("OS boot dev added automatically")
                else:
                    test.fail("OS boot dev not added as expected")
            if not status_error:
                vm_ip = vm.wait_for_get_address(0, timeout=240)
                remote_session = remote.wait_for_login("ssh", vm_ip, "22",
                                                       username, password,
                                                       r"[\#\$]\s*$")
                if test_cmd:
                    status, output = remote_session.cmd_status_output(test_cmd)
                    logging.debug("CMD '%s' running result is:\n%s", test_cmd,
                                  output)
                    if expected_output:
                        if not re.search(expected_output, output):
                            test.fail("Expected '%s' to match '%s'"
                                      " but failed." %
                                      (output, expected_output))
                    if status:
                        test.fail("Failed to boot %s from %s" %
                                  (vm_name, vmxml.xml))
                remote_session.close()
        logging.debug("Succeed to boot %s" % vm_name)
    finally:
        # Remove ceph configure file if created.
        for a_file in [ceph_cfg, keyring_file]:
            if os.path.exists(a_file):
                os.remove(a_file)
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        vmxml_backup.sync(options="--nvram")
        if cleanup_gluster:
            process.run("umount /mnt", ignore_status=True, shell=True)
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
        if cleanup_iscsi:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_iso_file:
            process.run("rm -rf %s" % boot_iso_file,
                        shell=True,
                        ignore_status=True)
        if cleanup_image_file:
            process.run("rm -rf %s" % download_file_path,
                        shell=True,
                        ignore_status=True)
        if cleanup_released_image_file:
            process.run("rm -rf %s" % download_released_file_path,
                        shell=True,
                        ignore_status=True)
Example #47
0
 elif checkpoint.startswith('floppy'):
     img_path = data_dir.get_tmp_dir() + '/floppy.img'
     utlv.create_local_disk('floppy', img_path)
     attach_removable_media('floppy', img_path, 'fda')
     if checkpoint == 'floppy_devmap':
         insert_floppy_devicemap()
 elif checkpoint.startswith('fstab'):
     if checkpoint == 'fstab_cdrom':
         img_path = data_dir.get_tmp_dir() + '/cdrom.iso'
         utlv.create_local_disk('iso', img_path)
         attach_removable_media('cdrom', img_path, 'hdc')
     elif checkpoint == 'fstab_virtio':
         change_disk_bus('virtio')
     specify_fstab_entry(checkpoint[6:])
 elif checkpoint == 'running':
     virsh.start(vm_name)
     logging.info('VM state: %s' %
                  virsh.domstate(vm_name).stdout.strip())
 elif checkpoint == 'paused':
     virsh.start(vm_name, '--paused')
     logging.info('VM state: %s' %
                  virsh.domstate(vm_name).stdout.strip())
 elif checkpoint == 'serial_terminal':
     grub_serial_terminal()
     check_boot()
 elif checkpoint == 'no_space':
     create_large_file()
 elif checkpoint == 'corrupt_rpmdb':
     corrupt_rpmdb()
 elif checkpoint == 'bogus_kernel':
     bogus_kernel()
Example #48
0
def run(test, params, env):
    """
    Test command: virsh cpu-compare.

    Compare host CPU with a CPU described by an XML file.
    1.Get all parameters from configuration.
    2.Prepare temp file saves of CPU information.
    3.Perform virsh cpu-compare operation.
    4.Confirm the result.
    """

    def get_cpu_xml(target, mode):
        """
        Get CPU information and put it into a file.

        :param target: Test target, host or guest's cpu description.
        :param mode: Test mode, decides file's detail.
        """
        libvirtxml = vm_xml.VMCPUXML()
        if target == "host":
            cpu_feature_list = host_cpu_xml.get_feature_list()
            if cpu_match:
                libvirtxml['match'] = cpu_match
            libvirtxml['vendor'] = host_cpu_xml['vendor']
            libvirtxml['model'] = host_cpu_xml['model']
            for cpu_feature in cpu_feature_list:
                feature_name = cpu_feature.get('name')
                libvirtxml.add_feature(feature_name, "require")
        else:
            try:
                libvirtxml = vmxml['cpu']
            except LibvirtXMLNotFoundError:
                raise error.TestNAError("No <cpu> element in domain XML")

        if mode == "modify":
            if modify_target == "mode":
                libvirtxml['mode'] = modify_value
            elif modify_target == "model":
                libvirtxml['model'] = modify_value
            elif modify_target == "vendor":
                libvirtxml['vendor'] = modify_value
            elif modify_target == "feature_name":
                if modify_value == "REPEAT":
                    feature_name = libvirtxml.get_feature_name(feature_num)
                    feature_policy = libvirtxml.get_feature_policy(0)
                    libvirtxml.add_feature(feature_name, feature_policy)
                else:
                    libvirtxml.set_feature(feature_num, name=modify_value)
            elif modify_target == "feature_policy":
                libvirtxml.set_feature(feature_num, policy=modify_value)
        elif mode == "delete":
            libvirtxml.remove_feature(feature_num)
        else:
            pass
        return libvirtxml

    # Get all parameters.
    ref = params.get("cpu_compare_ref")
    mode = params.get("cpu_compare_mode", "")
    modify_target = params.get("cpu_compare_modify_target", "")
    modify_value = params.get("cpu_compare_modify_value", "")
    feature_num = int(params.get("cpu_compare_feature_num", -1))
    target = params.get("cpu_compare_target", "host")
    extra = params.get("cpu_compare_extra", "")
    file_name = params.get("cpu_compare_file_name", "cpu.xml")
    cpu_match = params.get("cpu_compare_cpu_match", "")
    modify_invalid = "yes" == params.get("cpu_compare_modify_invalid", "no")
    check_vm_ps = "yes" == params.get("check_vm_ps", "no")
    check_vm_ps_value = params.get("check_vm_ps_value")
    tmp_file = os.path.join(test.tmpdir, file_name)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy()
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    host_cpu_xml = capability_xml.CapabilityXML()

    try:
        # Add cpu element if it not present in VM XML
        if not vmxml.get('cpu'):
            new_cpu = vm_xml.VMCPUXML()
            new_cpu['model'] = host_cpu_xml['model']
            vmxml['cpu'] = new_cpu
        # Add cpu model element if it not present in VM XML
        if not vmxml['cpu'].get('model'):
            vmxml['cpu']['model'] = host_cpu_xml['model']
        # Prepare VM cpu feature if necessary
        if modify_target in ['feature_name', 'feature_policy', 'delete']:
            if len(vmxml['cpu'].get_feature_list()) == 0:
                # Add a host feature to VM for testing
                vmxml_cpu = vmxml['cpu'].copy()
                vmxml_cpu.add_feature(host_cpu_xml.get_feature_name('-1'))
                vmxml['cpu'] = vmxml_cpu
                vmxml.sync()

        # Prepare temp compare file.
        cpu_compare_xml = get_cpu_xml(target, mode)
        cpu_compare_xml_f = open(tmp_file, 'w+b')
        if mode == "clear":
            cpu_compare_xml_f.truncate(0)
        else:
            cpu_compare_xml.xmltreefile.write(cpu_compare_xml_f)
        cpu_compare_xml_f.seek(0)
        logging.debug("CPU description XML:\n%s", cpu_compare_xml_f.read())
        cpu_compare_xml_f.close()

        # Expected possible result msg patterns and exit status
        msg_patterns = ""
        if not mode:
            if target == "host":
                msg_patterns = "identical"
            else:
                # As we don't know the <cpu> element in domain,
                # so just check command exit status
                pass
        elif mode == "delete":
            if cpu_match == "strict":
                msg_patterns = "incompatible"
            else:
                msg_patterns = "superset"
        elif mode == "modify":
            if modify_target == "mode":
                if modify_invalid:
                    msg_patterns = "Invalid mode"
            elif modify_target == "model":
                if modify_invalid:
                    msg_patterns = "Unknown CPU model"
            elif modify_target == "vendor":
                if modify_invalid:
                    msg_patterns = "incompatible"
            elif modify_target == "feature_name":
                if modify_value == "REPEAT":
                    msg_patterns = "more than once"
                elif modify_value == "ia64":
                    msg_patterns = "incompatible"
                elif modify_invalid:
                    msg_patterns = "Unknown"
            elif modify_target == "feature_policy":
                if modify_value == "forbid":
                    msg_patterns = "incompatible"
                else:
                    msg_patterns = "identical"
            else:
                raise error.TestNAError("Unsupport modify target %s in this "
                                        "test" % mode)
        elif mode == "clear":
            msg_patterns = "empty"
        elif mode == "invalid_test":
            msg_patterns = ""
        else:
            raise error.TestNAError("Unsupport modify mode %s in this "
                                    "test" % mode)
        status_error = params.get("status_error", "")
        if status_error == "yes":
            expected_status = 1
        elif status_error == "no":
            expected_status = 0
        else:
            # If exit status is not specified in cfg, using msg_patterns
            # to get expect exit status
            if msg_patterns in ['identical', 'superset']:
                expected_status = 0
            else:
                expected_status = 1
            # Default guest cpu compare should pass
            if not mode and target == "guest":
                expected_status = 0

        if ref == "file":
            ref = tmp_file
        ref = "%s %s" % (ref, extra)

        # Perform virsh cpu-compare operation.
        result = virsh.cpu_compare(ref, ignore_status=True, debug=True)

        if os.path.exists(tmp_file):
            os.remove(tmp_file)
        # Check result
        logging.debug("Expect command exit status: %s", expected_status)
        if result.exit_status != expected_status:
            raise error.TestFail("Exit status %s is not expected"
                                 % result.exit_status)
        if msg_patterns:
            logging.debug("Expect key word in comand output: %s", msg_patterns)
            if result.stdout.strip():
                output = result.stdout.strip()
            else:
                output = result.stderr.strip()
            if not output.count(msg_patterns):
                raise error.TestFail("Not find expect key word in command output")

        # Check VM for cpu 'mode' related cases
        if check_vm_ps:
            vmxml['cpu'] = cpu_compare_xml
            vmxml.sync()
            virsh.start(vm_name, ignore_status=True, debug=True)
            vm_pid = vm.get_pid()
            if vm_pid is None:
                raise error.TestError("Could not get VM pid")
            with open("/proc/%d/cmdline" % vm_pid) as vm_cmdline_file:
                vm_cmdline = vm_cmdline_file.read()
            vm_features_str = ""
            # cmdline file is always separated by NUL characters('\x00')
            for item in vm_cmdline.split('\x00-'):
                if item.count('cpu'):
                    vm_features_str = item
            logging.debug("VM cpu device: %s", vm_features_str)
            vm_features = []
            for f in vm_features_str.split(','):
                if f.startswith('+'):
                    vm_features.append(f[1:])
            host_features = []
            if check_vm_ps_value == 'CAPABILITY':
                for feature in host_cpu_xml.get_feature_list():
                    host_features.append(feature.get('name'))
            else:
                host_features.append(check_vm_ps_value)
            for feature in vm_features:
                if feature not in host_features:
                    raise error.TestFail("Not find %s in host capability"
                                         % feature)
    finally:
        vmxml_backup.sync()
Example #49
0
def run(test, params, env):
    """
    Test Live update the numatune nodeset and memory can spread to other node
    automatically.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    status_error = "yes" == params.get("status_error", "no")
    memory_mode = params.get("memory_mode")
    error_message = params.get("error_message")
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    memhog_rt = {}

    try:
        # Prepare host
        constants = prepare_host_for_test(params, test)
        numa_memory = constants[0]
        oversize = constants[1]
        undersize = constants[2]
        memory_to_eat = constants[3]
        neighbour = constants[4]
        nodeset_string = constants[5]
        # Prepare VM XML
        prepare_vm_xml_for_test(vm_name, numa_memory, oversize, undersize)
        # Start the VM
        virsh.start(vm_name, ignore_status=False, debug=True)
        session = vm.wait_for_login()
        # Check guest numatune status
        result = virsh.numatune(vm_name, debug=True)
        if result.exit_status:
            test.fail(
                "Something went wrong during the virsh numatune command.")
        # Change guest numatune status
        result = virsh.numatune(vm_name,
                                mode=memory_mode,
                                nodeset=nodeset_string,
                                debug=True)
        if result.exit_status:
            if status_error:
                # libvirt forbid memory migration for strict mode
                utils_test.libvirt.check_status_output(
                    result.exit_status,
                    result.stderr_text,
                    expected_fails=[error_message])
            else:
                test.fail(
                    "Something went wrong during the 'virsh numatune {} {}' "
                    "command.".format(memory_mode, nodeset_string))
        if not status_error:
            # Prepare Guest
            prepare_guest_for_test(vm_name, session, test, oversize,
                                   memory_to_eat)
            # And get the numastat prior the test
            total_prior = get_qemu_total_for_nodes()
            # Consume guest memory to make memory spread
            make_memory_spread(test, vm, session, memory_to_eat, memhog_rt)
            # Get the numastat after the test
            total_after = get_qemu_total_for_nodes()
            check_numa_memory(test, params, neighbour, total_prior,
                              total_after)

    finally:
        backup_xml.sync()
        process.run("swapon -a", shell=True)
Example #50
0
def run(test, params, env):
    """
    Test Guest can start with different cpu mode settings
    and do managedsave
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only')

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')

    cpu_mode = params.get('cpu_mode', '')
    match = params.get('match', '')
    model = params.get('model', '')
    fallback = params.get('fallback', '')
    qemu_line = params.get('qemu_line', '')
    do_managedsave = 'yes' == params.get('do_managedsave', 'no')

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Test mode='custom' and cpu model is the exact model which
        # should be upper case. And qemu command line also should
        # include '-cpu $MODEL'
        model = model.upper() if cpu_mode == 'custom' else model
        qemu_line = '-cpu %s' % model if model.isupper() else qemu_line

        # Create cpu xml for test
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.mode = cpu_mode
        if model:
            cpu_xml.model = model
        if fallback:
            cpu_xml.fallback = fallback
        if match:
            cpu_xml.match = match
        logging.debug(cpu_xml)

        # Update vm's cpu
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml.cpu = cpu_xml
        vmxml.sync()
        logging.debug(vmxml)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check error message of negative cases
        if status_error and error_msg:
            if model.lower() == 'power42':
                msg_list = error_msg.split(',')
                error_msg = msg_list[0] if model.isupper() else msg_list[1]
            libvirt.check_result(result, expected_fails=[error_msg])

        # Check qemu command line and other checkpoints
        if not status_error:
            if qemu_line:
                logging.info('Search for "%s" in qemu command line', qemu_line)
                qemu_cmd_line = process.run('ps -ef|grep qemu',
                                            shell=True,
                                            verbose=True).stdout_text
                if qemu_line in qemu_cmd_line:
                    logging.info('PASS: qemu command line check')
                else:
                    test.fail('Fail: qemu command line check: %s not found' %
                              qemu_line)
            if do_managedsave:
                managed_save_result = virsh.managedsave(vm_name, debug=True)
                libvirt.check_exit_status(managed_save_result)
                start_result = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(start_result)

    finally:
        bkxml.sync()
Example #51
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocado_test":
                testlist = utils_test.get_avocadotestlist(params)
                bt = utils_test.run_avocado_bg(vm, params, test, testlist)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms",
                                       params=params,
                                       vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        max_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': max_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': max_vcpu,
                    'guest_live': max_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel(
                        "Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocado_test":
                guestbt.join()
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms",
                                         params=params,
                                         vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        current_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': current_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': current_vcpu,
                    'guest_live': current_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(
                        os.path.join(root_cpuset_path, "machine.slice")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path,
                                                "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
Example #52
0
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroied.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file",
                              "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")
    check_vm = "yes" == params.get("check_vm")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    # Backup the current network xml
    net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name)
    virsh.net_dumpxml(network_name, to_file=net_xml_bk)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            virsh.net_define(net_xml_bk)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug(
                "destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd or check_vm:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(
                ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                libvirtd = utils_libvirtd.Libvirtd("virtqemud")
                daemon_name = libvirtd.service_name
                pid_before_run = utils_misc.get_pid(daemon_name)
                ret = virsh.net_destroy(net_ref,
                                        extra,
                                        uri=uri,
                                        debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                pid_after_run = utils_misc.get_pid(daemon_name)
                if pid_after_run != pid_before_run:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug(
                        "libvirtd do not crash after destroy network!")
                    status = 0
                if check_libvirtd:
                    # destroy vm, check libvirtd pid no change
                    ret = virsh.destroy(vm_name)
                    utils_test.libvirt.check_exit_status(ret,
                                                         expect_error=False)
                    pid_after_run2 = utils_misc.get_pid(daemon_name)
                    if pid_after_run2 != pid_before_run:
                        test.fail("libvirtd crash after destroy vm!")
                        status = 1
                    else:
                        logging.debug(
                            "libvirtd do not crash after destroy vm!")
                        status = 0
                elif check_vm:
                    # restart libvirtd and check vm is running
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    if not virsh.is_alive(vm_name):
                        test.fail(
                            "vm shutdown when transient network destroyed then libvirtd restart"
                        )
                    else:
                        status = 0

        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref,
                                   extra,
                                   uri=uri,
                                   readonly=readonly,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroyed.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug(
                    "transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_define(net_xml_bk, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")

    # Clean up the backup network xml file
    if os.path.isfile(net_xml_bk):
        data_dir.clean_tmp_files()
        logging.debug("Cleaning up the network backup xml")

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
Example #53
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    enable_vcpu,
                                    "--enable",
                                    ignore_status=False,
                                    debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name,
                                    disable_vcpu,
                                    "--disable",
                                    ingnore_status=False,
                                    debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     live_vcpus,
                                     ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     config_vcpus,
                                     "--config",
                                     ignore_status=False,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt,
                                           output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not cpu.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #54
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    count = params.get("setvcpus_count", "")
    convert_err = "Can't convert {0} to integer type"
    try:
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))
    current_vcpu = int(params.get("setvcpus_current", "1"))
    try:
        current_vcpu = int(current_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(current_vcpu))
    max_vcpu = int(params.get("setvcpus_max", "4"))
    try:
        max_vcpu = int(max_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(max_vcpu))
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, 'tmp.xml')
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = params.get("topology_sockets")
    cores = params.get("topology_cores")
    threads = params.get("topology_threads")
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    start_vm_expect_fail = "yes" == params.get("start_vm_expect_fail", "no")
    remove_vm_feature = params.get("remove_vm_feature", "")

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM")
                               or local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #

    try:
        if vm.is_alive():
            vm.destroy()

        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        # Get the number of cpus, current value if set, and machine type
        orig_count, orig_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug(
            "Before run setvcpus: cpu_count=%d, cpu_current=%d,"
            " mtype=%s", orig_count, orig_current, mtype)

        # Set cpu topology
        if set_topology:
            vmcpu_xml = vm_xml.VMCPUXML()
            vmcpu_xml['topology'] = {
                'sockets': sockets,
                'cores': cores,
                'threads': threads
            }
            vmxml['cpu'] = vmcpu_xml
            vmxml.sync()

        # Remove vm features
        if remove_vm_feature:
            try:
                vmfeature_xml = vmxml['features']
            except xcepts.LibvirtXMLNotFoundError, e:
                logging.debug("features not found in xml\n%s", e)
            else:
                vmfeature_xml.remove_feature(remove_vm_feature)
                vmxml['features'] = vmfeature_xml
                vmxml.sync()
                logging.debug("xml after remove feature is:\n%s",
                              vmxml.xmltreefile)

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if orig_count == 1 and count == 1:
            logging.debug(
                "Original vCPU count is 1, just checking if setvcpus "
                "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error,
             setvcpu_exit_stderr) = remote_test(remote_ip, local_ip,
                                                remote_pwd, remote_prompt,
                                                vm_name, status_error)
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            option_list = options.split(" ")
            for item in option_list:
                if virsh.has_command_help_match(command, item) is None:
                    raise error.TestNAError("The current libvirt version"
                                            " doesn't support '%s' option" %
                                            item)
            status = virsh.setvcpus(dom_option,
                                    count_option,
                                    options,
                                    ignore_status=True,
                                    debug=True)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

            # Start VM after set vcpu
            if start_vm_after_set:
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name,
                                         ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result, start_vm_expect_fail)
        def vm_check(status_error):
            """
            Checking the VM
            """
            if status_error:
                return

            if output_mode == 'json' and not check_json_output(params):
                test.fail('check json output failed')
            if output_mode == 'local' and not check_local_output(params):
                test.fail('check local output failed')
            if output_mode in ['null', 'json', 'local']:
                return

            # vmchecker must be put before skip_vm_check in order to clean up
            # the VM.
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if skip_vm_check == 'yes':
                logging.info('Skip checking vm after conversion: %s' %
                             skip_reason)
                return

            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                virsh.start(vm_name, debug=True)

            # Check guest following the checkpoint document after conversion
            logging.info('Checking common checkpoints for v2v')
            if 'ogac' in checkpoint:
                # windows guests will reboot at any time after qemu-ga is
                # installed. The process cannot be controlled. In order to
                # don't break vmchecker.run() process, It's better to put
                # check_windows_ogac before vmchecker.run(). Because in
                # check_windows_ogac, it waits until rebooting completes.
                vmchecker.checker.create_session()
                if os_type == 'windows':
                    services = ['qemu-ga']
                    if not utils_v2v.multiple_versions_compare(
                            V2V_UNSUPPORT_RHEV_APT_VER):
                        services.append('rhev-apt')
                    if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'):
                        services.append('spice-ga')
                    for ser in services:
                        check_windows_service(vmchecker.checker, ser)
                else:
                    check_linux_ogac(vmchecker.checker)
            if 'mac_ip' in checkpoint:
                check_static_ip_conf(vmchecker.checker)
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if 'ogac' in checkpoint and 'signature' in checkpoint:
                if not utils_v2v.multiple_versions_compare(
                        V2V_UNSUPPORT_RHEV_APT_VER):
                    check_windows_signature(vmchecker.checker,
                                            r'c:\rhev-apt.exe')
            if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml:
                test.fail('CDROM no longer exists')
            if 'vmtools' in checkpoint:
                check_vmtools(vmchecker.checker, checkpoint)
            if 'modprobe' in checkpoint:
                check_modprobe(vmchecker.checker)
            if 'device_map' in checkpoint:
                check_device_map(vmchecker.checker)
            if 'resume_swap' in checkpoint:
                check_resume_swap(vmchecker.checker)
            if 'rhev_file' in checkpoint:
                check_rhev_file_exist(vmchecker.checker)
            if 'file_architecture' in checkpoint:
                check_file_architecture(vmchecker.checker)
            if 'ubuntu_tools' in checkpoint:
                check_ubuntools(vmchecker.checker)
            if 'vmware_tools' in checkpoint:
                check_windows_vmware_tools(vmchecker.checker)
            if 'without_default_net' in checkpoint:
                if virsh.net_state_dict()[net_name]['active']:
                    log_fail("Bridge virbr0 already started during conversion")
            if 'rhsrvany_checksum' in checkpoint:
                check_rhsrvany_checksums(vmchecker.checker)
            if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link):
                test.fail("checkpoint '%s' failed" % checkpoint)
            # Merge 2 error lists
            error_list.extend(vmchecker.errors)
            # Virtio drivers will not be installed without virtio-win setup
            if 'virtio_win_unset' in checkpoint:
                missing_list = params.get('missing').split(',')
                expect_errors = ['Not find driver: ' + x for x in missing_list]
                logging.debug('Expect errors: %s' % expect_errors)
                logging.debug('Actual errors: %s' % error_list)
                if set(error_list) == set(expect_errors):
                    error_list[:] = []
                else:
                    logging.error('Virtio drivers not meet expectation')
Example #56
0
def run(test, params, env):
    """
    Test for basic serial character device function.

    1) Define the VM with specified serial device and define result meets
       expectation.
    2) Test whether defined XML meets expectation
    3) Start the guest and check if start result meets expectation
    4) Test the function of started serial device
    5) Shutdown the VM and check whether cleaned up properly
    6) Clean up environment
    """
    def set_targets(serial):
        """
        Prepare a serial device XML according to parameters
        """
        machine = platform.machine()
        if "ppc" in machine:
            serial.target_model = 'spapr-vty'
            serial.target_type = 'spapr-vio-serial'
        elif "aarch" in machine:
            serial.target_model = 'pl011'
            serial.target_type = 'system-serial'
        elif "s390x" in machine and 'sclp' not in serial_type:
            if target_model:
                serial.target_model = target_model
            else:
                serial.target_model = 'sclpconsole'
            serial.target_type = 'sclp-serial'
        else:
            serial.target_model = target_type
            serial.target_type = target_type

    def prepare_spice_graphics_device():
        """
        Prepare a spice graphics device XML according to parameters
        """
        graphic = Graphics(type_name='spice')
        graphic.autoport = "yes"
        graphic.port = "-1"
        graphic.tlsPort = "-1"
        return graphic

    def prepare_serial_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        serial = librarian.get('serial')(local_serial_type)

        serial.target_port = "0"

        set_targets(serial)

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        serial.sources = sources
        return serial

    def prepare_console_device():
        """
        Prepare a serial device XML according to parameters
        """
        local_serial_type = serial_type
        if serial_type == "tls":
            local_serial_type = "tcp"
        console = librarian.get('console')(local_serial_type)
        console.target_type = console_target_type
        console.target_port = console_target_port

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        console.sources = sources
        return console

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing serial device.
        """
        fail_patts = []
        if serial_type in [
                'dev', 'file', 'pipe', 'unix'
        ] and not any(['path' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source path attribute for char device")
        if serial_type in [
                'tcp'
        ] and not any(['host' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source host attribute for char device")
        if serial_type in [
                'tcp', 'udp'
        ] and not any(['service' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source service attribute for char "
                              "device")
        if serial_type in [
                'spiceport'
        ] and not any(['channel' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing source channel attribute for char "
                              "device")
        if serial_type in [
                'nmdm'
        ] and not any(['master' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing master path attribute for nmdm device")
        if serial_type in [
                'nmdm'
        ] and not any(['slave' in s for s in serial_dev.sources]):
            fail_patts.append(r"Missing slave path attribute for nmdm device")
        if serial_type in ['spicevmc']:
            fail_patts.append(r"spicevmc device type only supports virtio")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def check_xml():
        """
        Predict the result serial device and generated console device
        and check the result domain XML against expectation
        """
        console_cls = librarian.get('console')

        local_serial_type = serial_type

        if serial_type == 'tls':
            local_serial_type = 'tcp'
        # Predict expected serial and console XML
        expected_console = console_cls(local_serial_type)

        if local_serial_type == 'udp':
            sources = []
            for source in serial_dev.sources:
                if 'service' in source and 'mode' not in source:
                    source['mode'] = 'connect'
                sources.append(source)
        else:
            sources = serial_dev.sources

        expected_console.sources = sources

        if local_serial_type == 'tcp':
            if 'protocol_type' in local_serial_type:
                expected_console.protocol_type = serial_dev.protocol_type
            else:
                expected_console.protocol_type = "raw"

        expected_console.target_port = serial_dev.target_port
        if 'target_type' in serial_dev:
            expected_console.target_type = serial_dev.target_type
        expected_console.target_type = console_target_type
        logging.debug("Expected console XML is:\n%s", expected_console)

        # Get current serial and console XML
        current_xml = VMXML.new_from_dumpxml(vm_name)
        serial_elem = current_xml.xmltreefile.find('devices/serial')
        console_elem = current_xml.xmltreefile.find('devices/console')
        if console_elem is None:
            test.fail("Expect generate console automatically, "
                      "but found none.")
        if serial_elem and console_target_type != 'serial':
            test.fail("Don't Expect exist serial device, "
                      "but found:\n%s" % serial_elem)

        cur_console = console_cls.new_from_element(console_elem)
        logging.debug("Current console XML is:\n%s", cur_console)
        # Compare current serial and console with oracle.
        if not expected_console == cur_console:
            # "==" has been override
            test.fail("Expect generate console:\n%s\nBut got:\n%s" %
                      (expected_console, cur_console))

        if console_target_type == 'serial':
            serial_cls = librarian.get('serial')
            expected_serial = serial_cls(local_serial_type)
            expected_serial.sources = sources

            set_targets(expected_serial)

            if local_serial_type == 'tcp':
                if 'protocol_type' in local_serial_type:
                    expected_serial.protocol_type = serial_dev.protocol_type
                else:
                    expected_serial.protocol_type = "raw"
            expected_serial.target_port = serial_dev.target_port
            if serial_elem is None:
                test.fail("Expect exist serial device, " "but found none.")
            cur_serial = serial_cls.new_from_element(serial_elem)
            if target_type == 'pci-serial':
                if cur_serial.address is None:
                    test.fail("Expect serial device address is not assigned")
                else:
                    logging.debug("Serial address is: %s", cur_serial.address)

            logging.debug("Expected serial XML is:\n%s", expected_serial)
            logging.debug("Current serial XML is:\n%s", cur_serial)
            # Compare current serial and console with oracle.
            if (target_type != 'pci-serial' and machine_type != 'pseries'
                    and not expected_serial == cur_serial):
                # "==" has been override
                test.fail("Expect serial device:\n%s\nBut got:\n "
                          "%s" % (expected_serial, cur_serial))

    def prepare_serial_console():
        """
        Prepare serial console to connect with guest serial according to
        the serial type
        """
        is_server = console_type == 'server'
        if serial_type == 'unix':
            # Unix socket path should match SELinux label
            socket_path = '/var/lib/libvirt/qemu/virt-test'
            console = Console('unix', socket_path, is_server)
        elif serial_type == 'tls':
            host = '127.0.0.1'
            service = 5556
            console = Console('tls', (host, service), is_server,
                              custom_pki_path)
        elif serial_type == 'tcp':
            host = '127.0.0.1'
            service = 2445
            console = Console('tcp', (host, service), is_server)
        elif serial_type == 'udp':
            host = '127.0.0.1'
            service = 2445
            console = Console('udp', (host, service), is_server)
        elif serial_type == 'file':
            socket_path = '/var/log/libvirt/virt-test'
            console = Console('file', socket_path, is_server)
        elif serial_type == 'pipe':
            socket_path = '/tmp/virt-test'
            console = Console('pipe', socket_path, is_server)
        else:
            logging.debug("Serial type %s don't support console test yet.",
                          serial_type)
            console = None
        return console

    def check_qemu_cmdline():
        """
        Check if VM's qemu command line meets expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        options = cmdline.split('\x00')
        ser_opt = None
        con_opt = None
        ser_dev = None
        con_dev = None
        exp_ser_opt = None
        exp_ser_dev = None
        exp_con_opt = None
        exp_con_dev = None
        # Get serial and console options from qemu command line
        for idx, opt in enumerate(options):
            if opt == '-chardev':
                if 'id=charserial' in options[idx + 1]:
                    ser_opt = options[idx + 1]
                    ser_dev = options[idx + 3]
                    logging.debug('Serial option is: %s', ser_opt)
                    logging.debug('Serial device option is: %s', ser_dev)
                if 'id=charconsole' in options[idx + 1]:
                    con_opt = options[idx + 1]
                    con_dev = options[idx + 3]
                    logging.debug('Console option is: %s', con_opt)
                    logging.debug('Console device option is: %s', con_dev)

        # Get expected serial and console options from params
        if serial_type == 'dev':
            ser_type = 'tty'
        elif serial_type in ['unix', 'tcp', 'tls']:
            ser_type = 'socket'
        else:
            ser_type = serial_type

        exp_ser_opts = [ser_type, 'id=charserial0']
        if serial_type in ['dev', 'file', 'pipe', 'unix']:
            for source in serial_dev.sources:
                if 'path' in source:
                    path = source['path']
            if serial_type == 'file':
                # Use re to make this fdset number flexible
                exp_ser_opts.append(r'path=/dev/fdset/\d+')
                exp_ser_opts.append('append=on')
            elif serial_type == 'unix':
                # Use re to make this fd number flexible
                exp_ser_opts.append(r'fd=\d+')
            else:
                exp_ser_opts.append('path=%s' % path)
        elif serial_type in ['tcp', 'tls']:
            for source in serial_dev.sources:
                if 'host' in source:
                    host = source['host']
                if 'service' in source:
                    port = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
        elif serial_type in ['udp']:
            localaddr = ''
            localport = '0'
            for source in serial_dev.sources:
                if source['mode'] == 'connect':
                    if 'host' in source:
                        host = source['host']
                    if 'service' in source:
                        port = source['service']
                else:
                    if 'host' in source:
                        localaddr = source['host']
                    if 'service' in source:
                        localport = source['service']
            exp_ser_opts.append('host=%s' % host)
            exp_ser_opts.append('port=%s' % port)
            exp_ser_opts.append('localaddr=%s' % localaddr)
            exp_ser_opts.append('localport=%s' % localport)

        if serial_type in ['unix', 'tcp', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source:
                    mode = source['mode']
            if mode == 'bind':
                exp_ser_opts.append('server')
                exp_ser_opts.append('nowait')

        if serial_type == 'tls':
            exp_ser_opts.append('tls-creds=objcharserial0_tls0')

        exp_ser_opt = ','.join(exp_ser_opts)

        arch = platform.machine()
        if "ppc" in arch:
            exp_ser_devs = [
                'spapr-vty', 'chardev=charserial0', 'reg=0x30000000'
            ]
            if libvirt_version.version_compare(3, 9, 0):
                exp_ser_devs.insert(2, 'id=serial0')
        elif "s390x" in arch:
            dev_type = 'sclpconsole'
            if target_model:
                dev_type = target_model
            exp_ser_devs = [dev_type, 'chardev=charserial0', 'id=serial0']
        else:
            logging.debug('target_type: %s', target_type)
            if target_type == 'pci-serial':
                exp_ser_devs = [
                    'pci-serial', 'chardev=charserial0', 'id=serial0',
                    r'bus=pci.\d+', r'addr=0x\d+'
                ]
            else:
                exp_ser_devs = [
                    'isa-serial', 'chardev=charserial0', 'id=serial0'
                ]
        exp_ser_dev = ','.join(exp_ser_devs)

        if console_target_type != 'serial' or serial_type == 'spicevmc':
            exp_ser_opt = None
            exp_ser_dev = None
        logging.debug("exp_ser_opt: %s", exp_ser_opt)
        logging.debug("ser_opt: %s", ser_opt)

        # Check options against expectation
        if exp_ser_opt is not None and re.match(exp_ser_opt, ser_opt) is None:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_ser_opt, ser_opt))
        if exp_ser_dev is not None and ser_dev is not None \
           and re.match(exp_ser_dev, ser_dev) is None:
            test.fail('Expect get qemu command serial device option "%s", '
                      'but got "%s"' % (exp_ser_dev, ser_dev))

        if console_target_type == 'virtio':
            exp_con_opts = [serial_type, 'id=charconsole1']
            exp_con_opt = ','.join(exp_con_opts)

            exp_con_devs = []
            if console_target_type == 'virtio':
                exp_con_devs.append('virtconsole')
            exp_con_devs += ['chardev=charconsole1', 'id=console1']
            exp_con_dev = ','.join(exp_con_devs)
            if con_opt != exp_con_opt:
                test.fail('Expect get qemu command console option "%s", '
                          'but got "%s"' % (exp_con_opt, con_opt))
            if con_dev != exp_con_dev:
                test.fail(
                    'Expect get qemu command console device option "%s", '
                    'but got "%s"' % (exp_con_dev, con_dev))

    def check_serial_console(console, username, password):
        """
        Check whether console works properly by read the result for read only
        console and login and emit a command for read write console.
        """
        if serial_type in ['file', 'tls']:
            _, output = console.read_until_output_matches(['.*[Ll]ogin:'])
        else:
            console.wait_for_login(username, password)
            status, output = console.cmd_status_output('pwd')
            logging.debug("Command status: %s", status)
            logging.debug("Command output: %s", output)

    def cleanup(objs_list):
        """
        Clean up test environment
        """
        if serial_type == 'file':
            if os.path.exists('/var/log/libvirt/virt-test'):
                os.remove('/var/log/libvirt/virt-test')

        # recovery test environment
        for obj in objs_list:
            obj.auto_recover = True
            del obj

    def get_console_type():
        """
        Check whether console should be started before or after guest starting.
        """
        if serial_type in ['tcp', 'unix', 'udp', 'tls']:
            for source in serial_dev.sources:
                if 'mode' in source and source['mode'] == 'connect':
                    return 'server'
            return 'client'
        elif serial_type in ['file']:
            return 'client'
        elif serial_type in ['pipe']:
            return 'server'

    serial_type = params.get('serial_dev_type', 'pty')
    sources_str = params.get('serial_sources', '')
    username = params.get('username')
    password = params.get('password')
    console_target_type = params.get('console_target_type', 'serial')
    target_type = params.get('target_type', 'isa-serial')
    target_model = params.get('target_model', '')
    console_target_port = params.get('console_target_port', '0')
    second_serial_console = params.get('second_serial_console', 'no') == 'yes'
    custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev')
    auto_recover = params.get('auto_recover', 'no')
    client_pwd = params.get('client_pwd', None)
    server_pwd = params.get('server_pwd', None)
    machine_type = params.get('machine_type', '')
    remove_devices = params.get('remove_devices', 'serial,console').split(',')

    args_list = [client_pwd, server_pwd]

    for arg in args_list:
        if arg and arg.count("ENTER.YOUR."):
            raise test.fail("Please assign a value for %s!" % arg)

    vm_name = params.get('main_vm', 'virt-tests-vm1')
    vm = env.get_vm(vm_name)

    # it's used to clean up TLS objs later
    objs_list = []

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        for device_type in remove_devices:
            vm_xml.remove_all_device_by_type(device_type)
        if serial_type == "tls":
            test_dict = dict(params)
            tls_obj = TLSConnection(test_dict)
            if auto_recover == "yes":
                objs_list.append(tls_obj)
            tls_obj.conn_setup(False, True, True)

        serial_dev = prepare_serial_device()
        if console_target_type == 'serial' or second_serial_console:
            logging.debug('Serial device:\n%s', serial_dev)
            vm_xml.add_device(serial_dev)
            if serial_type == "spiceport":
                spice_graphics_dev = prepare_spice_graphics_device()
                logging.debug('Spice graphics device:\n%s', spice_graphics_dev)
                vm_xml.add_device(spice_graphics_dev)

        console_dev = prepare_console_device()
        logging.debug('Console device:\n%s', console_dev)
        vm_xml.add_device(console_dev)
        if second_serial_console:
            console_target_type = 'serial'
            console_target_port = '1'
            console_dev = prepare_console_device()
            logging.debug('Console device:\n%s', console_dev)
            vm_xml.add_device(console_dev)
            vm_xml.undefine()
            res = virsh.define(vm_xml.xml)
            if res.stderr.find(r'Only the first console can be a serial port'):
                logging.debug("Test only the first console can be a serial"
                              "succeeded")
                return
            test.fail("Test only the first console can be serial failed.")

        console_type = get_console_type()

        if not define_and_check():
            logging.debug("Can't define the VM, exiting.")
            return
        if console_target_type != 'virtio':
            check_xml()

        expected_fails = []
        if serial_type == 'nmdm' and platform.system() != 'FreeBSD':
            expected_fails.append("unsupported chardev 'nmdm'")

        # Prepare console before start when console is server
        if console_type == 'server':
            console = prepare_serial_console()

        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails, [])
        if res.exit_status:
            logging.debug("Can't start the VM, exiting.")
            return
        check_qemu_cmdline()
        # Prepare console after start when console is client
        if console_type == 'client':
            console = prepare_serial_console()

        if (console_type is not None and serial_type != 'tls'
                and console_type != 'server'):
            check_serial_console(console, username, password)

        vm.destroy()

    finally:
        cleanup(objs_list)
        vm_xml_backup.sync()
Example #57
0
def run(test, params, env):
    """
    Test hpt resizing
    """
    resizing = params.get('resizing')
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    hpt_order_path = params.get('hpt_order_path', '')
    qemu_check = params.get('qemu_check', '')

    def set_hpt(resizing, vmxml, sync=True):
        """
        Set resizing value to vm xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        features_xml.hpt_resizing = resizing
        vmxml.features = features_xml
        if sync:
            vmxml.sync()

    def set_memory(vmxml):
        """
        Set memory attributes in vm xml
        """
        vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848))
        vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16))
        vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB')

        cpu = vm_xml.VMCPUXML()
        cpu.xml = "<cpu><numa/></cpu>"

        numa_cell = eval(params.get('numa_cell'))
        logging.debug(numa_cell)

        vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1

        cpu.numa_cell = numa_cell
        vmxml.cpu = cpu
        vmxml.sync()

    def check_hpt_order(session, resizing=''):
        """
        Return htp order in hpt_order file by default
        If 'resizing' is disabled, test updating htp_order
        """
        if not hpt_order_path:
            test.cancel('No hpt order path provided.')
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        if resizing == 'disabled':
            cmd_result = session.cmd_status_output(
                'echo %d > %s' % (hpt_order + 1, hpt_order_path))
            result = process.CmdResult(stderr=cmd_result[1],
                                       exit_status=cmd_result[0])
            libvirt.check_exit_status(result, True)
            libvirt.check_result(result, [error_msg])
        return hpt_order

    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        arch = platform.machine()
        new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Test on ppc64le hosts
        if arch.lower() == 'ppc64le':
            set_hpt(resizing, new_xml)
            if resizing == 'enabled':
                set_memory(new_xml)

            # Start vm and check if start succeeds
            libvirt.check_exit_status(virsh.start(vm_name, debug=True))
            libvirt.check_qemu_cmd_line(qemu_check)
            session = vm.wait_for_login()
            hpt_order = check_hpt_order(session, resizing)

            if resizing == 'enabled':
                mem_xml = utils_hotplug.create_mem_xml(
                    tg_size=int(params.get('mem_size', 2048000)),
                    tg_sizeunit=params.get('size_unit', 'KiB'),
                    tg_node=int(params.get('mem_node', 0)),
                    mem_model=params.get('mem_model', 'dimm'))
                logging.debug(mem_xml)

                # Attach memory device to the guest for 12 times
                # that will reach the maxinum memory limitation
                for i in range(12):
                    virsh.attach_device(vm_name,
                                        mem_xml.xml,
                                        debug=True,
                                        ignore_status=False)
                xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name)
                logging.debug(xml_after_attach)

                # Check dumpxml of the guest,
                # check if each device has its alias
                for i in range(12):
                    pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i
                    logging.debug('Searching for %s', pattern)
                    if not re.search(pattern, str(
                            xml_after_attach.xmltreefile)):
                        test.fail('Missing memory alias: %s' % pattern)

                # Log in the guest and check dmesg
                dmesg = session.cmd('dmesg')
                logging.debug(dmesg)
                dmesg_content = params.get('dmesg_content', '').split('|')
                for order in range(1, 3):
                    order += hpt_order
                    for content in dmesg_content:
                        if content % order not in dmesg:
                            test.fail('Missing dmesg: %s' % (content % order))

        # Test on non-ppc64le hosts
        else:
            set_hpt(resizing, new_xml, sync=False)
            result = virsh.define(new_xml.xml)
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
Example #58
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        # cleanup env and create vfs
        cmd = "echo 0 > %s/sriov_numvfs" % pci_address
        if driver == "mlx4_core":
            cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core"
        process.run(cmd, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address)
        if driver == "mlx4_core":
            cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \
                    % (vf_num, vf_num)
        test_res = process.run(cmd, shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                net_count = len(net_diff)
                if ((driver != "mlx4_core" and net_count != vf_num) or
                    (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail(
                    "Get net list with 'virsh nodedev-list' failed\n")

        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300)
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()
        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        if not net_diff:
            test.fail("Get net list with 'virsh nodedev-list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            mac = ':'.join(net[length - 6:])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if driver != "mlx4_core" and sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
        Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
            new_iface.model = "virtio"
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        def check_addr_attrs():
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device = live_xml.devices
            hostdev_list = device.by_device_tag("hostdev")
            for hostdev in hostdev_list:
                addr = hostdev.source.untyped_address
                hostdev_addr_attrs = {
                    "domain": addr.domain,
                    "bus": addr.bus,
                    "slot": addr.slot,
                    "function": addr.function
                }
                if hostdev_addr_attrs == vf_addr_attrs:
                    return False
            return True

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if vf_type == "hostdev":
            check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60)
            if not check_ret:
                test.fail("The hostdev device detach failed from xml\n")
        else:
            utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)

        if vf_type != "hostdev":
            get_ip_by_mac(mac_addr, timeout=60)

        device = live_xml.devices

        if vf_type == "hostdev":
            hostdev_list = device.by_device_tag("hostdev")
            if len(hostdev_list) == 0:
                test.fail("The hostdev device attach failed from xml\n")
            else:
                for hostdev in hostdev_list:
                    if hostdev.type == "pci":
                        break
                interface = hostdev

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {
                    'type': 'pci',
                    'domain': '0',
                    'slot': '0',
                    'bus': index - 1,
                    'function': '0'
                }
                controller.address = controller.new_controller_address(
                    **{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {
                'type': 'pci',
                'domain': '0',
                'slot': '0',
                'bus': bus_id,
                'function': '0'
            }
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" %
                                    vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' %
                                           vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    dev_type = params.get("dev_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model in ("pci-bridge",
                                                         "pcie-root-port"):
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()
    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = int(max_vfs // 2 + 1)
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []
        vf_mac_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name),
                      'r') as f:
                vf_mac = f.readline().strip()
            vf_name_list.append(vf_name)
            vf_mac_list.append(vf_mac)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            if dev_type:
                mac_addr = vf_mac_list[0]
                new_iface = utils_test.libvirt.create_hostdev_xml(
                    vf_addr, managed=managed, xmlfile=False)
            else:
                new_iface = create_interface()
                mac_addr = new_iface.mac_address
            if inactive_pool:
                result = virsh.attach_device(vm_name,
                                             new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if vf_type == "hostdev":
                        addr = interface.source.untyped_address
                        vf_addr_attrs = {
                            "domain": addr.domain,
                            "bus": addr.bus,
                            "slot": addr.slot,
                            "function": addr.function
                        }
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if nfv:
            vf_driver = os.readlink(
                os.path.join(pci_device_dir, vf_list[0],
                             "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            osxml = vmxml.os
            if "i440fx" in vmxml.os.machine:
                osxml.machine = "q35"
                vmxml.os = osxml
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                process.run("cat %s" % iface.xml, shell=True).stdout_text
                result = virsh.attach_device(vm_name,
                                             iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if driver == "mlx4_core":
            # Reload mlx4 driver to default setting
            process.run(
                "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core",
                shell=True)
            process.run(
                "modprobe mlx4_core; modprobe mlx4_ib;  modprobe mlx4_en",
                shell=True)
        else:
            process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
Example #59
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
Example #60
0
def run(test, params, env):
    """
    Test command: virsh domstats.

    1.Prepare vm state.
    2.Perform virsh domstats operation.
    3.Confirm the test result.
    4.Recover test environment.
    """
    default_vm_name = params.get("main_vm", "virt-tests-vm1")
    default_vm = env.get_vm(default_vm_name)
    vm_list = params.get("vm_list", "")
    vm_state = params.get("vm_state", "")
    domstats_option = params.get("domstats_option")
    raw_print = "yes" == params.get("raw_print", "no")
    enforce_command = "yes" == params.get("enforce_command", "no")
    status_error = (params.get("status_error", "no") == "yes")

    vms = [default_vm]
    if vm_list:
        for name in vm_list.split():
            if name != default_vm_name:
                vms.append(env.get_vm(name))
    backup_xml_list = []
    try:
        if not status_error:
            for vm in vms:
                # Back up xml file
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
                backup_xml_list.append(vmxml.copy())
                if vm_state == "crash":
                    if vm.is_alive():
                        vm.destroy(gracefully=False)
                    vmxml.on_crash = "preserve"
                    # Add <panic> device to domain
                    panic_dev = Panic()
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                    vmxml.add_device(panic_dev)
                    vmxml.sync()
                    virsh.start(vm.name, ignore_status=False)
                    vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm.name)
                    # Skip this test if no panic device find
                    if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                        raise error.TestNAError("No 'panic' device in the guest,"
                                                " maybe your libvirt version"
                                                " doesn't support it.")
                prepare_vm_state(vm, vm_state)
        if enforce_command:
            domstats_option += " --enforce"
        if raw_print:
            domstats_option += " --raw"
        # Run virsh command
        result = virsh.domstats(vm_list, domstats_option, ignore_status=True,
                                debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                if "unsupported flags" in result.stderr:
                    raise error.TestNAError(result.stderr)
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            else:
                for vm in vms:
                    if not check_output(output, vm, vm_state, domstats_option):
                        raise error.TestFail("Check command output failed")
    finally:
        try:
            for vm in vms:
                vm.destroy(gracefully=False)
        except AttributeError:
            pass
        for backup_xml in backup_xml_list:
            backup_xml.sync()