Ejemplo n.º 1
0
 def do_operation():
     """
         Do operation in guest os with vf and check the os behavior after operation.
     """
     if operation == "resume_suspend":
         try:
             virsh.suspend(vm.name, debug=True, ignore_status=False)
             virsh.resume(vm.name, debug=True, ignore_statue=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "reboot":
         try:
             if vm.serial_console is not None:
                 vm.cleanup_serial_console()
                 vm.create_serial_console()
             virsh.reboot(vm.name, ignore_status=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "save":
         result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
         utils_test.libvirt.check_exit_status(result, expect_error=True)
Ejemplo n.º 2
0
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vcpus = str(vmxml.vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 3
0
    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()
Ejemplo n.º 4
0
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 5
0
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": ":%s /[0-9]*<\/vcpu>/" + expected_vcpu + "<\/vcpu>",
                    "recover": ":%s /[0-9]*<\/vcpu>/" + original_vcpu + "<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        # Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 6
0
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": ":%s /1<\/vcpu>/2<\/vcpu>",
            "recover": ":%s /2<\/vcpu>/1<\/vcpu>"
        }
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != '2':
            return False
        return status
Ejemplo n.º 7
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                sockets = str(int(topology['sockets']) + 1)
                cores = topology['cores']
                threads = topology['threads']
                vmcpu_xml = vm_xml.VMCPUXML()
                vmcpu_xml['topology'] = {
                    'sockets': sockets,
                    'cores': cores,
                    'threads': threads
                }
                vmxml_backup['cpu'] = vmcpu_xml
                vmxml_backup.sync()
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 8
0
def run(test, params, env):

    vm_name = params.get("main_vm")
    status_error = ("yes" == params.get("status_error", "no"))
    start_ga = ("yes" == params.get("start_ga", "yes"))
    prepare_channel = ("yes" == params.get("prepare_channel", "yes"))
    src_path = params.get("src_path")
    tgt_name = params.get("tgt_name", "org.qemu.guest_agent.0")
    restart_libvirtd = ("yes" == params.get("restart_libvirtd"))
    suspend_resume_guest = ("yes" == params.get("suspend_resume_guest"))
    hotunplug_ga = ("yes" == params.get("hotunplug_ga"))
    label = params.get("con_label")
    vm = env.get_vm(vm_name)

    if src_path:
        socket_file_dir = os.path.dirname(src_path)
        if not os.path.exists(socket_file_dir):
            os.mkdir(socket_file_dir)
        shutil.chown(socket_file_dir, "qemu", "qemu")
        utils_selinux.set_context_of_file(filename=socket_file_dir,
                                          context=label)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_agent_channels()
    vmxml.sync()

    try:
        if prepare_channel:
            vm.prepare_guest_agent(start=start_ga,
                                   channel=True,
                                   source_path=src_path)

        if restart_libvirtd:
            utils_libvirtd.libvirtd_restart()

        if suspend_resume_guest:
            virsh.suspend(vm_name, debug=True)
            virsh.resume(vm_name, debug=True)

        if hotunplug_ga:
            ga_xml = get_ga_xml(vm, vm_name)
            result = virsh.detach_device(vm_name, ga_xml)
            if result.exit_status:
                test.fail("hotunplug guest agent device failed, %s" % result)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if vmxml.get_agent_channels():
                test.fail("hotunplug guest agent device failed as "
                          "guest agent xml still exists")
        else:
            if start_ga != check_ga_state(vm, vm_name):
                test.fail("guest agent device is not in correct state")

        check_ga_function(vm_name, status_error, hotunplug_ga)
    finally:
        vm.destroy()
        backup_xml.sync()
Ejemplo n.º 9
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                sockets = str(int(topology['sockets']) + 1)
                cores = topology['cores']
                threads = topology['threads']
                vmcpu_xml = vm_xml.VMCPUXML()
                vmcpu_xml['topology'] = {'sockets': sockets, 'cores': cores,
                                         'threads': threads}
                vmxml_backup['cpu'] = vmcpu_xml
                vmxml_backup.sync()
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 10
0
 def set_condn(action, recover=False):
     """
     Set/reset guest state/action
     :param action: Guest state change/action
     :param recover: whether to recover given state default: False
     """
     if not recover:
         if action == "pin_vcpu":
             for i in range(cur_vcpu):
                 virsh.vcpupin(vm_name, i, hmi_cpu, "--live",
                               ignore_status=False, debug=True)
                 virsh.emulatorpin(vm_name,  hmi_cpu, "live",
                                   ignore_status=False, debug=True)
         elif action == "filetrans":
             utils_test.run_file_transfer(test, params, env)
         elif action == "save":
             save_file = os.path.join(data_dir.get_tmp_dir(),
                                      vm_name + ".save")
             result = virsh.save(vm_name, save_file, ignore_status=True,
                                 debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             if os.path.exists(save_file):
                 result = virsh.restore(save_file, ignore_status=True,
                                        debug=True)
                 utils_test.libvirt.check_exit_status(result)
                 os.remove(save_file)
         elif action == "suspend":
             result = virsh.suspend(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             result = virsh.resume(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
     return
Ejemplo n.º 11
0
    def vm_recover_check(guest_name):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        """
        ret = virsh.dom_list()
        # This time vm should not be in the list
        if re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
        virsh.start(guest_name)
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
        # This time vm should be in the list
        ret = virsh.dom_list()
        if not re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
Ejemplo n.º 12
0
    def vm_recover_check(guest_name):
        """
        Check if the vm can be recovered correctly.

        @param: guest_name : Checked vm's name.
        """
        ret = virsh.dom_list()
        #This time vm should not be in the list
        if re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
        virsh.start(guest_name)
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
        #This time vm should be in the list
        ret = virsh.dom_list()
        if  not re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
Ejemplo n.º 13
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"
        }
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 14
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = exec_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = exec_edit(vm_name, [dic_mode["recover"]])
        if status and new_vcpus != expected_vcpu:
            return False
        return status
Ejemplo n.º 15
0
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {"edit" : ":%s /1<\/vcpu>/2<\/vcpu>",
                    "recover" : ":%s /2<\/vcpu>/1<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status :
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status  and vcpus != '2':
            return False
        return status
Ejemplo n.º 16
0
 def pause_resume_vm(vm_name):
     """
     Use negative cmd to trigger guest io error to paused, then resume it
     :params vm_name: string, name of the vm
     :params return: None
     """
     cmd_ = '''virsh qemu-monitor-command %s '{"execute":"stop"}' ''' % vm_name
     out = process.run(cmd_, shell=True).stdout_text.strip()
     logging.debug("The out of the qemu-monitor-command is %s", out)
     vm = env.get_vm(vm_name)
     if not vm.is_paused():
         test.error("VM %s is not paused by qemu-monitor-command!" %
                    vm_name)
     res = virsh.resume(vm_name, debug=True)
     libvirt.check_exit_status(res)
 def test_suspend():
     # Suspend
     result = virsh.suspend(vm_name, ignore_status=True, debug=True)
     libvirt.check_exit_status(result)
     cmd = "virsh domstate %s" % vm_name
     if "paused" not in virsh.domstate(vm_name, **dargs).stdout:
         test.fail("suspend vm failed")
     # Resume
     result = virsh.resume(vm_name, ignore_status=True, debug=True)
     libvirt.check_exit_status(result)
     if "running" not in virsh.domstate(vm_name, **dargs).stdout:
         test.fail("resume vm failed")
     if check_attach_pci():
         logging.debug("adapter found after suspend/resume")
     else:
         test.fail("passthroughed adapter not found after suspend/resume")
Ejemplo n.º 18
0
 def test_suspend():
     # Suspend
     result = virsh.suspend(vm_name, ignore_status=True, debug=True)
     libvirt.check_exit_status(result)
     cmd = "virsh domstate %s" % vm_name
     if "paused" not in virsh.domstate(vm_name, **dargs).stdout:
         test.fail("suspend vm failed")
     # Resume
     result = virsh.resume(vm_name, ignore_status=True, debug=True)
     libvirt.check_exit_status(result)
     if "running" not in virsh.domstate(vm_name, **dargs).stdout:
         test.fail("resume vm failed")
     if check_attach_pci():
         logging.debug("adapter found after suspend/resume")
     else:
         test.fail("passthroughed adapter not found after suspend/resume")
Ejemplo n.º 19
0
 def guest_lifecycle():
     if operation == "suspend":
         # Suspend
         logging.info("Performing VM Suspend with device pass-through")
         result = virsh.suspend(vm_name, ignore_status=True, debug=True)
         libvirt.check_exit_status(result)
         libvirt.check_vm_state(vm_name, 'paused')
         time.sleep(10)
         # Resume
         logging.info("Performing VM Resume with device pass-through")
         result = virsh.resume(vm_name, ignore_status=True, debug=True)
         libvirt.check_exit_status(result)
         libvirt.check_vm_state(vm_name, 'running')
     elif operation == "shutdown":
         # Shutdown and Start the VM
         try:
             logging.info("Performing VM Shutdown with device pass-through")
             vm.shutdown()
             vm.wait_for_shutdown()
             libvirt.check_vm_state(vm_name, 'shut off')
             logging.info("Performing VM Start with device pass-through")
             vm.start()
             libvirt.check_vm_state(vm_name, 'running')
             vm.wait_for_login().close()
         except virt_vm.VMStartError as detail:
             test.fail("VM failed to start."
                       "Error: %s" % str(detail))
     elif operation == "reboot":
         # Reboot
         logging.info("Performing VM Reboot with device pass-through")
         result = virsh.reboot(vm_name, ignore_status=True, debug=True)
         if supported_err in result.stderr.strip():
             logging.info("Reboot is not supported")
         else:
             libvirt.check_exit_status(result)
     else:
         logging.debug("No operation for the domain")
     if sorted(vm.get_pci_devices()) != sorted(nic_list_before):
         logging.debug("Adapter found after lifecycle operation")
     else:
         test.fail("Passthroughed adapter not found after lifecycle operation")
Ejemplo n.º 20
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                pm_xml.mem_enabled = pm_enabled
                pm_xml.disk_enabled = pm_enabled
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail("Expected failed with %s, but run succeed"
                                         ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail("Expected success, but run failed:\n%s"
                                         % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" % (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Ejemplo n.º 21
0
def run(test, params, env):
    """
    Test command: virsh resume.

    1) Start vm, Prepare options such as id, uuid
    2) Prepare vm state for test, default is paused.
    3) Prepare other environment
    4) Run command, get result.
    5) Check result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Get parameters
    vm_ref = params.get("resume_vm_ref", "domname")
    vm_state = params.get("resume_vm_state", "paused")
    option_suffix = params.get("resume_option_suffix")
    status_error = params.get("status_error", "no")
    readonly = params.get("readonly", "no") == 'yes'

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Prepare vm state
    if vm_state == "paused":
        logging.info("Pausing VM %s", vm_name)
        vm.pause()
    elif vm_state == "shutoff":
        logging.info("Shutting off VM %s", vm_name)
        vm.destroy()

    # Prepare options
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif domid and vm_ref == "hex_id":
        if domid == "-":
            vm_ref = domid
        else:
            vm_ref = hex(int(domid))

    if option_suffix:
        vm_ref = "%s %s" % (vm_ref, option_suffix)

    try:
        # Run resume command
        result = virsh.resume(vm_ref,
                              readonly=readonly,
                              ignore_status=True,
                              debug=True)

        # Get vm state after virsh resume executed.
        domstate = vm.state()

        # Check status_error
        if status_error == "yes":
            if result.exit_status == 0:
                raise exceptions.TestFail(
                    "Expect to fail to resume but succeeded")
        elif status_error == "no":
            if domstate == "paused":
                raise exceptions.TestFail(
                    "Resume VM failed. State is still paused")
            if result.exit_status != 0:
                raise exceptions.TestFail(
                    "Expect to resume successfully but failed")
    finally:
        vm.destroy()
Ejemplo n.º 22
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
    except error.CmdError:
        raise error.TestError("Guest prepare action error!")

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, extra, ignore_status=True)
        status = result.exit_status
        output = result.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error:
        if not status:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status or not output:
            raise error.TestFail("Run failed with right command")
        if extra.count("reason"):
            if vm_action == "suspend":
                # If not, will cost long time to destroy vm
                virsh.destroy(vm_name)
                if not output.count("user"):
                    raise ActionError(vm_action)
            elif vm_action == "resume":
                if not output.count("unpaused"):
                    raise ActionError(vm_action)
            elif vm_action == "destroy":
                if not output.count("destroyed"):
                    raise ActionError(vm_action)
            elif vm_action == "start":
                if not output.count("booted"):
                    raise ActionError(vm_action)
        if vm_ref == "remote":
            if not (re.search("running", output) or re.search(
                    "blocked", output) or re.search("idle", output)):
                raise error.TestFail("Run failed with right command")
Ejemplo n.º 23
0
def manipulate_vm(vm, operation, params=None):
    """
    Manipulate the VM.

    :param vm: VM instance
    :param operation: stress_in_vms, inject_nmi, dump, suspend_resume
                      or save_restore
    :param params: Test parameters
    """
    err_msg = ''
    # Special operations for test
    if operation == "stress":
        logging.debug("Load stress in VM")
        err_msg = utils_test.load_stress(operation, params=params, vms=[vm])[0]
    elif operation == "inject_nmi":
        inject_times = int(params.get("inject_times", 10))
        logging.info("Trying to inject nmi %s times", inject_times)
        while inject_times > 0:
            try:
                inject_times -= 1
                virsh.inject_nmi(vm.name, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Inject nmi failed: %s" % detail
    elif operation == "dump":
        dump_times = int(params.get("dump_times", 10))
        logging.info("Trying to dump vm %s times", dump_times)
        while dump_times > 0:
            dump_times -= 1
            dump_path = os.path.join(data_dir.get_tmp_dir(), "dump.file")
            try:
                virsh.dump(vm.name, dump_path, debug=True, ignore_status=False)
            except (process.CmdError, OSError) as detail:
                err_msg = "Dump %s failed: %s" % (vm.name, detail)
            try:
                os.remove(dump_path)
            except OSError:
                pass
    elif operation == "suspend_resume":
        paused_times = int(params.get("paused_times", 10))
        logging.info("Trying to suspend/resume vm %s times", paused_times)
        while paused_times > 0:
            paused_times -= 1
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s failed: %s" % (vm.name, detail)
    elif operation == "save_restore":
        save_times = int(params.get("save_times", 10))
        logging.info("Trying to save/restore vm %s times", save_times)
        while save_times > 0:
            save_times -= 1
            save_path = os.path.join(data_dir.get_tmp_dir(), "save.file")
            try:
                virsh.save(vm.name, save_path, debug=True,
                           ignore_status=False)
                virsh.restore(save_path, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Save-Restore %s failed: %s" % (vm.name, detail)
            try:
                os.remove(save_path)
            except OSError:
                pass
    else:
        err_msg = "Unsupported operation in this function: %s" % operation
    return err_msg
Ejemplo n.º 24
0
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # run test
    if vm_ref == "" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            try:
                status, output = session.cmd_status_output(cmd, timeout=10)
            finally:
                session.close()
            if not re.search("processor", output):
                raise error.TestFail("Unable to read /proc/cpuinfo")
        tmp_file = os.path.join(test.tmpdir, "save.file")
        virsh.save(vm_name, tmp_file)
        if vm_ref == "saved_file":
            vm_ref = tmp_file
        elif vm_ref == "empty_new_file":
            tmp_file = os.path.join(test.tmpdir, "new.file")
            open(tmp_file, 'w').close()
            vm_ref = tmp_file
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    if not status_error:
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    try:
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            if not re.search(vm_name, list_output):
                raise error.TestFail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after restore"
                                         " due to the option --paused")
            if extra_param.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after restore"
                                         " due to the option --running")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Ejemplo n.º 25
0
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name,
                                         ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result, start_vm_expect_fail)

    finally:
        new_count, new_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug(
            "After run setvcpus: cpu_count=%d, cpu_current=%d,"
            " mtype=%s", new_count, new_current, mtype)

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            # RHEL7/Fedora has a bug(BZ#1000354) against qemu-kvm, so throw the
            # bug info here
            if remove_vm_feature:
                logging.error(
                    "You may encounter bug: "
                    "https://bugzilla.redhat.com/show_bug.cgi?id=1000354")
            raise error.TestFail("Run successfully with wrong command!")
    else:
Ejemplo n.º 26
0
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """
    def check_file_own(file_path, exp_uid, exp_gid):
        """
        Check the uid and gid of file_path

        :param file_path: The file path
        :param exp_uid: The expected uid
        :param exp_gid: The expected gid
        :raise: test.fail if the uid and gid of file are not expected
        """
        fstat_res = os.stat(file_path)
        if fstat_res.st_uid != exp_uid or fstat_res.st_gid != exp_gid:
            test.fail("The uid.gid {}.{} is not expected, it should be {}.{}.".
                      format(fstat_res.st_uid, fstat_res.st_gid, exp_uid,
                             exp_gid))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    time_before_save = int(params.get('time_before_save', 0))
    setup_nfs = "yes" == params.get("setup_nfs", "no")
    setup_iscsi = "yes" == params.get("setup_iscsi", "no")
    check_log = params.get("check_log")
    check_str_not_in_log = params.get("check_str_not_in_log")
    qemu_conf_dict = eval(params.get("qemu_conf_dict", "{}"))

    vm_ref_uid = None
    vm_ref_gid = None
    qemu_conf = None

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    try:
        if "--xml" in extra_param:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name,
                                                  options="--migratable")
            backup_xml = vmxml.copy()
            # Grant more priveledge on the file in order for un-priveledge user
            # to access.
            os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
            if not setup_nfs:
                extra_param = "--xml %s" % vmxml.xml
                dict_os_attrs = {}
                if "hd" in vmxml.os.boots:
                    dict_os_attrs.update({"boots": ["cdrom"]})
                    vmxml.set_os_attrs(**dict_os_attrs)
                else:
                    test.cancel("Please add 'hd' in boots for --xml testing")
                logging.info("vmxml os is %s after update" %
                             vmxml.os.xmltreefile)
            else:
                params["mnt_path_name"] = params.get("nfs_mount_dir")
                vm_ref_uid = params["change_file_uid"] = pwd.getpwnam(
                    "qemu").pw_uid
                vm_ref_gid = params["change_file_gid"] = grp.getgrnam(
                    "qemu").gr_gid
                libvirt.set_vm_disk(vm, params)

        session = vm.wait_for_login()
        # Clear log file
        if check_log:
            cmd = "> %s" % check_log
            process.run(cmd, shell=True, verbose=True)
        if qemu_conf_dict:
            logging.debug("Update qemu configuration file.")
            qemu_conf = libvirt.customize_libvirt_config(
                qemu_conf_dict, "qemu")
            process.run("cat /etc/libvirt/qemu.conf", shell=True, verbose=True)

        # run test
        if vm_ref == "" or vm_ref == "xyz":
            status = virsh.restore(vm_ref,
                                   extra_param,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri,
                                   ignore_status=True).exit_status
        else:
            if os_type == "linux":
                cmd = "cat /proc/cpuinfo"
                try:
                    status, output = session.cmd_status_output(cmd, timeout=10)
                finally:
                    session.close()
                if not re.search("processor", output):
                    test.fail("Unable to read /proc/cpuinfo")
            tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file")
            if setup_iscsi:
                tmp_file = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                          is_login=True,
                                                          image_size='1G')
            time.sleep(time_before_save)
            ret = virsh.save(vm_name, tmp_file, debug=True)
            libvirt.check_exit_status(ret)
            if vm_ref == "saved_file" or setup_iscsi:
                vm_ref = tmp_file
            elif vm_ref == "empty_new_file":
                tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file")
                with open(tmp_file, 'w') as tmp:
                    pass
                vm_ref = tmp_file

            # Change the ownership of the saved file
            if vm_ref_uid and vm_ref_gid:
                os.chown(vm_ref, vm_ref_uid, vm_ref_gid)
                tmpdir = data_dir.get_tmp_dir()
                dump_xml = os.path.join(tmpdir, "test.xml")
                virsh.save_image_dumpxml(vm_ref, "> %s" % dump_xml)
                extra_param = "--xml %s" % dump_xml
                check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)

            if vm.is_alive():
                vm.destroy()
            if pre_status == "start":
                virsh.start(vm_name)
            if libvirtd == "off":
                utils_libvirtd.libvirtd_stop()
            status = virsh.restore(vm_ref,
                                   extra_param,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri,
                                   ignore_status=True).exit_status
        if not status_error:
            list_output = virsh.dom_list().stdout.strip()

        session.close()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status:
                test.fail("Run failed with right command")
            if not re.search(vm_name, list_output):
                test.fail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            if (extra_param.count("running") or extra_param.count("xml")
                    or not extra_param):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be" " running after restore")
            if extra_param.count("xml"):
                if not setup_nfs:
                    aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    boots_list = aft_vmxml.os.boots
                    if "hd" in boots_list or "cdrom" not in boots_list:
                        test.fail("Update xml with restore failed")
                else:
                    if vm_ref_uid and vm_ref_gid:
                        check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)
                        vm.destroy()
                        check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)
            if check_str_not_in_log and check_log:
                libvirt.check_logfile(check_str_not_in_log, check_log, False)
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
        if "--xml" in extra_param:
            backup_xml.sync()
        if setup_nfs:
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         mount_dir=params.get("mnt_path_name"),
                                         export_dir=params.get("export_dir"),
                                         rm_export_dir=False)
        if setup_iscsi:
            libvirt.setup_or_cleanup_iscsi(False)
Ejemplo n.º 27
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
Ejemplo n.º 28
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    process.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK), shell=True)

    dump_path = os.path.join(test.tmpdir, "dump/")
    dump_file = ""
    try:
        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                panic_dev.addr_type = "isa"
                panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path,
                                                              QEMU_CONF)
            process.run(cmd, shell=True)
            libvirtd_service.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + vm_name + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                raise error.TestNAError("No 'panic' device in the guest,"
                                        " maybe your libvirt version doesn't"
                                        " support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                libvirtd_service.stop()
                kill_process_by_pattern(vm_name)
                libvirtd_service.restart()
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
        except process.CmdError, e:
            raise error.TestError("Guest prepare action error: %s" % e)

        if libvirtd == "off":
            libvirtd_service.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or not output:
                raise error.TestFail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        raise ActionError(vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        raise ActionError(vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        raise ActionError(vm_action)
                elif vm_action == "start":
                    if not output.count("booted"):
                        raise ActionError(vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        raise ActionError(vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             dump_file):
                        raise ActionError(vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    raise error.TestFail("Run failed with right command")
Ejemplo n.º 29
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    count = params.get("setvcpus_count")
    set_current = int(params.get("setvcpus_current", "0"))
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, 'tmp.xml')
    test_set_max = 2

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError("remote/local ip parameters not set.")

    # Save original configuration
    orig_config_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Get the number of cpus, current value if set, and machine type
    orig_set, orig_current, mtype = get_xmldata(vm_name, tmpxml, options)
    logging.debug("orig_set=%d orig_current=%d mtype=%s",
                  orig_set, orig_current, mtype)

    # Normal processing of the test is to set the vcpu count to 2 and then
    # adjust the 'current_vcpu' value to 1 effectively removing a vcpu.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #
    # If the set_current is set, then we are adding CPU's, thus we must
    # set then 'current_vcpu' value to something lower than our count in
    # order to test that if we start with a current=1 and a count=2 that we
    # can set our current up to our count. If our orig_set count is 1, then
    # don't add a vCPU to a VM that perhaps doesn't want one.  We still need
    # to check if 'virsh setvcpus <domain> 1' would work, so continue on.
    #
    if set_current != 0 and orig_set >= 2:
        if vm.is_alive():
            vm.destroy()
        vm_xml = libvirt_xml.VMXML()
        if set_current >= test_set_max:
            raise error.TestFail("Current(%d) >= test set max(%d)" %
                                 (set_current, test_set_max))
        vm_xml.set_vm_vcpus(vm_name, test_set_max, set_current)
        # Restart, unless that's not our test
        if pre_vm_state != "shut off":
            vm.start()
            vm.wait_for_login()

    if orig_set == 1:
        logging.debug("Original vCPU count is 1, just checking if setvcpus "
                      "can still set current.")

    domid = vm.get_id()  # only valid for running
    domuuid = vm.get_uuid()

    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off" and vm.is_alive():
        vm.destroy()

    try:
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error,
             setvcpu_exit_stderr) = remote_test(remote_ip,
                                                local_ip,
                                                remote_pwd,
                                                remote_prompt,
                                                vm_name,
                                                status_error)
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            option_list = options.split(" ")
            for item in option_list:
                if virsh.has_command_help_match(command, item) is None:
                    raise error.TestNAError("The current libvirt version"
                                            " doesn't support '%s' option"
                                            % item)
            status = virsh.setvcpus(dom_option, count_option, options,
                                    ignore_status=True, debug=True)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

    finally:
        vcpus_set, vcpus_current, mtype = get_xmldata(vm_name, tmpxml, options)

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                raise error.TestNAError("guest <os> machine property '%s' "
                                        "may be too old to allow hotplug.",
                                        mtype)

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                raise error.TestNAError("virsh setvcpu hotplug unsupported, "
                                        " mtype=%s" % mtype)

            # Cannot set current vcpu count large than max vcpu count
            if orig_set == 1 and count > orig_set:
                raise error.TestNAError(setvcpu_exit_stderr)

            # Otherwise, it seems we have a real error
            raise error.TestFail("Run failed with right command mtype=%s stderr=%s" %
                                 (mtype, setvcpu_exit_stderr))
        else:
            if "--maximum" in options:
                if vcpus_set != int(count):
                    raise error.TestFail("failed to set --maximum vcpus "
                                         "to %s mtype=%s" %
                                         (count, mtype))
            else:
                if orig_set >= 2 and set_current != 0:
                    # If we're adding a cpu we go from:
                    #    <vcpu ... current='1'...>2</vcpu>
                    # to
                    #    <vcpu ... >2</vcpu>
                    # where vcpus_current will be 0 and vcpus_set will be 2
                    if vcpus_current != 0 and vcpus_set != test_set_max:
                        raise error.TestFail("Failed to add current=%d, "
                                             "set=%d, count=%d mtype=%s" %
                                             (vcpus_current, vcpus_set,
                                              test_set_max, mtype))
                elif orig_set >= 2 and set_current == 0:
                    # If we're removing a cpu we go from:
                    #    <vcpu ... >2</vcpu>
                    # to
                    #    <vcpu ... current='1'...>2</vcpu>
                    # where vcpus_current will be 1 and vcpus_set will be 2
                    if vcpus_current != 1 and vcpus_set != test_set_max:
                        raise error.TestFail("Failed to remove current=%d, "
                                             "set=%d, count=%d mtype=%s" %
                                             (vcpus_current, vcpus_set,
                                              test_set_max, mtype))
                # If we have a starting place of 1 vCPUs, then this is rather
                # boring and innocuous case, but libvirt will succeed, so just
                # handle it
                elif orig_set == 1 and vcpus_current != 0 and vcpus_set != 1:
                    raise error.TestFail("Failed when orig_set is 1 current=%d, "
                                         "set=%d, count=%d mtype=%s" %
                                         (vcpus_current, vcpus_set,
                                          test_set_max, mtype))
Ejemplo n.º 30
0
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # define function
    def vm_recover_check(guest_name, option):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            raise error.TestFail("Guest should be inactive")
        virsh.start(guest_name)
        # This time vm should be in the list
        if vm.is_dead():
            raise error.TestFail("Guest should be active")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after started"
                                         " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of initia guest state")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref")
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    option = params.get("managedsave_option", "")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            raise error.TestNAError("Older libvirt does not"
                                    " handle arguments consistently")

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.count("invalid"):
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # stop the libvirtd service
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Ignore exception with "ignore_status=True"
    if progress:
        option += " --verbose"
    option += extra_param
    ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
    status = ret.exit_status
    # The progress information outputed in error message
    error_msg = ret.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    try:
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            if progress:
                if not error_msg.count("Managedsave:"):
                    raise error.TestFail("Got invalid progress output")
            vm_recover_check(vm_name, option)
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Ejemplo n.º 31
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    status_error = (params.get("status_error", "no") == "yes")
    convert_err = "Can't convert {0} to integer type"
    try:
        current_vcpu = int(params.get("setvcpus_current", "1"))
    except ValueError:
        test.error(convert_err.format(current_vcpu))
    try:
        max_vcpu = int(params.get("setvcpus_max", "4"))
    except ValueError:
        test.error(convert_err.format(max_vcpu))
    try:
        count = params.get("setvcpus_count", "")
        if count:
            count = eval(count)
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))

    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_user = params.get("remote_user", "root")
    remote_uri = params.get("remote_uri")
    tmpxml = os.path.join(data_dir.get_tmp_dir(), 'tmp.xml')
    topology_correction = "yes" == params.get("topology_correction", "yes")
    result = True

    # Early death 1.1
    if remote_uri:
        if remote_ip.count("EXAMPLE.COM"):
            test.cancel("remote ip parameters not set.")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Early death 1.2
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match(command, item) is None:
            test.cancel("The current libvirt version"
                        " doesn't support '%s' option" % item)

    # Init expect vcpu count values
    exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                'cur_config': current_vcpu, 'cur_live': current_vcpu,
                'guest_live': current_vcpu}

    def set_expected(vm, options):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpus options
        """
        if ("config" in options) or ("current" in options and vm.is_dead()):
            if "maximum" in options:
                exp_vcpu["max_config"] = count
            else:
                exp_vcpu['cur_config'] = count
        if ("live" in options) or ("current" in options and vm.is_alive()):
            exp_vcpu['cur_live'] = count
            exp_vcpu['guest_live'] = count
        if options == '':
            # when none given it defaults to live
            exp_vcpu['cur_live'] = count
            exp_vcpu['guest_live'] = count

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #
    try:
        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        topology = vmxml.get_cpu_topology()
        if topology and ("config" and "maximum" in options) and not status_error:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            del vmxml.cpu
            vmxml.sync()
        vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                           topology_correction=topology_correction)

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        # Get the number of cpus, current value if set, and machine type
        cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options)
        logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", cpu_xml_data['vcpu'],
                      cpu_xml_data['current_vcpu'], cpu_xml_data['mtype'])

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if cpu_xml_data['vcpu'] == 1 and count == 1:
            logging.debug("Original vCPU count is 1, just checking if setvcpus "
                          "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "name":
            dom_option = vm_name
        elif vm_ref == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif vm_ref == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = vm_ref

        if remote_uri:
            status = virsh.setvcpus(dom_option, "1", "--config",
                                    ignore_status=True, debug=True, uri=remote_uri)
        else:
            status = virsh.setvcpus(dom_option, count_option, options,
                                    ignore_status=True, debug=True)
            if not status_error:
                set_expected(vm, options)
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option=options)
        setvcpu_exit_status = status.exit_status
        setvcpu_exit_stderr = status.stderr.strip()

    finally:
        cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options)
        logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", cpu_xml_data['vcpu'],
                      cpu_xml_data['current_vcpu'], cpu_xml_data['mtype'])

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error:
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                test.cancel("guest <os> machine property '%s' "
                            "may be too old to allow hotplug." % cpu_xml_data['mtype'])

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                test.cancel("virsh setvcpu hotplug unsupported, "
                            " mtype=%s" % cpu_xml_data['mtype'])

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command mtype=%s"
                      " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr))
        else:
            if not result:
                test.fail("Test Failed")
Ejemplo n.º 32
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    count = params.get("setvcpus_count", "")
    convert_err = "Can't convert {0} to integer type"
    try:
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))
    current_vcpu = int(params.get("setvcpus_current", "1"))
    try:
        current_vcpu = int(current_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(current_vcpu))
    max_vcpu = int(params.get("setvcpus_max", "4"))
    try:
        max_vcpu = int(max_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(max_vcpu))
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, "tmp.xml")
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = params.get("topology_sockets")
    cores = params.get("topology_cores")
    threads = params.get("topology_threads")
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    start_vm_expect_fail = "yes" == params.get("start_vm_expect_fail", "no")
    remove_vm_feature = params.get("remove_vm_feature", "")

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #

    try:
        if vm.is_alive():
            vm.destroy()

        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu)

        # Get the number of cpus, current value if set, and machine type
        orig_count, orig_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", orig_count, orig_current, mtype)

        # Set cpu topology
        if set_topology:
            vmcpu_xml = vm_xml.VMCPUXML()
            vmcpu_xml["topology"] = {"sockets": sockets, "cores": cores, "threads": threads}
            vmxml["cpu"] = vmcpu_xml
            vmxml.sync()

        # Remove vm features
        if remove_vm_feature:
            vmfeature_xml = vmxml["features"]
            vmfeature_xml.remove_feature(remove_vm_feature)
            vmxml["features"] = vmfeature_xml
            vmxml.sync()

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if orig_count == 1 and count == 1:
            logging.debug("Original vCPU count is 1, just checking if setvcpus " "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error, setvcpu_exit_stderr) = remote_test(
                remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error
            )
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            option_list = options.split(" ")
            for item in option_list:
                if virsh.has_command_help_match(command, item) is None:
                    raise error.TestNAError("The current libvirt version" " doesn't support '%s' option" % item)
            status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

            # Start VM after set vcpu
            if start_vm_after_set:
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(result, start_vm_expect_fail)

    finally:
        new_count, new_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", new_count, new_current, mtype)

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            # RHEL7/Fedora has a bug(BZ#1000354) against qemu-kvm, so throw the
            # bug info here
            if remove_vm_feature:
                logging.error("You may encounter bug: " "https://bugzilla.redhat.com/show_bug.cgi?id=1000354")
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr):
                raise error.TestNAError("guest <os> machine property '%s' " "may be too old to allow hotplug.", mtype)

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr):
                raise error.TestNAError("virsh setvcpu hotplug unsupported, " " mtype=%s" % mtype)

            # Otherwise, it seems we have a real error
            raise error.TestFail("Run failed with right command mtype=%s" " stderr=%s" % (mtype, setvcpu_exit_stderr))
        else:
            if "--maximum" in options:
                if new_count != count:
                    raise error.TestFail("Changing guest maximum vcpus failed" " while virsh command return 0")
            else:
                if new_current != count:
                    raise error.TestFail("Changing guest current vcpus failed" " while virsh command return 0")
    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()
        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to save running domain %s" % vm_name)

        # Edit the xml in the saved state file
        edit_image_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to restore domain %s" % vm_name)
        os.remove(vm_save)

        vm_state_check()

    finally:
        # cleanup
        if restore_state == "paused":
            virsh.resume(vm_name)

        if os.path.exists(vm_save):
            virsh.restore(vm_save)
            os.remove(vm_save)
Ejemplo n.º 34
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login(
            "ssh", remote_ssh_addr, "22", "root", remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(
            dom_option, count_option, options, ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
Ejemplo n.º 35
0
def run(test, params, env):
    """
    Test command: virsh save-image-dumpxml <file>
                  virsh save-image-define <file> [--xml <string>]

    1) Prepare test environment.
    2) Execute save-image-dumpxml to get VM xml description
    3) Edit the xml string and call virsh save-image-define to redefine it
    4) Restore VM
    5) Check the new xml of the VM
    """
    def get_image_xml():
        # Invoke save-image-dumpxml
        cmd_result = virsh.save_image_dumpxml(vm_save, debug=True)
        libvirt.check_exit_status(cmd_result)

        xml = cmd_result.stdout.strip()

        match_string = "<name>%s</name>" % vm_name
        if not re.search(match_string, xml):
            raise exceptions.TestFail("The xml from saved state file "
                                      "is invalid")
        return xml

    def redefine_new_xml():
        if restore_state == "running":
            option = "--running"
        elif restore_state == "paused":
            option = "--paused"
        else:
            raise exceptions.TestFail("Unknown save-image-define option")

        cmd_result = virsh.save_image_define(vm_save,
                                             xmlfile,
                                             option,
                                             debug=True)
        libvirt.check_exit_status(cmd_result)

    def vm_state_check():
        cmd_result = virsh.dumpxml(vm_name, debug=True)
        libvirt.check_exit_status(cmd_result)

        # The xml should contain the match_string
        xml = cmd_result.stdout.strip()
        match_string = "<boot dev='cdrom'/>"
        if not re.search(match_string, xml):
            raise exceptions.TestFail("After domain restore, "
                                      "the xml is not expected")

        domstate = virsh.domstate(vm_name, debug=True).stdout.strip()
        if restore_state != domstate:
            raise exceptions.TestFail("The domain state is not expected")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")

    restore_state = params.get("restore_state", "running")
    vm_save = params.get("vm_save", "vm.save")

    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()

        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        libvirt.check_exit_status(cmd_result)

        xml = get_image_xml()

        # Replace <boot dev='hd'/> to <boot dev='cdrom'/>
        newxml = xml.replace("<boot dev='hd'/>", "<boot dev='cdrom'/>")
        logging.debug("After string replacement, the new xml is %s", newxml)

        # Write new xml into a tempfile
        tmp_file = tempfile.NamedTemporaryFile(prefix=("new_vm_xml_"),
                                               dir=tmp_dir)
        xmlfile = tmp_file.name
        tmp_file.close()

        with open(xmlfile, 'w') as fd:
            fd.write(newxml)

        # Redefine new xml for domain's saved state file
        redefine_new_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        libvirt.check_exit_status(cmd_result)
        os.remove(vm_save)

        vm_state_check()

    finally:
        # cleanup
        if restore_state == "paused":
            virsh.resume(vm_name)

        if os.path.exists(vm_save):
            virsh.restore(vm_save)
            os.remove(vm_save)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
Ejemplo n.º 36
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = params.get("status_error", "no") == "yes"
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
    except error.CmdError:
        raise error.TestError("Guest prepare action error!")

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#")
            session.cmd_output("LANG=C")
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command, internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, extra, ignore_status=True)
        status = result.exit_status
        output = result.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error:
        if not status:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status or not output:
            raise error.TestFail("Run failed with right command")
        if extra.count("reason"):
            if vm_action == "suspend":
                # If not, will cost long time to destroy vm
                virsh.destroy(vm_name)
                if not output.count("user"):
                    raise ActionError(vm_action)
            elif vm_action == "resume":
                if not output.count("unpaused"):
                    raise ActionError(vm_action)
            elif vm_action == "destroy":
                if not output.count("destroyed"):
                    raise ActionError(vm_action)
            elif vm_action == "start":
                if not output.count("booted"):
                    raise ActionError(vm_action)
        if vm_ref == "remote":
            if not (re.search("running", output) or re.search("blocked", output) or re.search("idle", output)):
                raise error.TestFail("Run failed with right command")
Ejemplo n.º 37
0
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0"), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.

        # Ensure libvirtd is started
        if not libvirtd.is_running():
            libvirtd.start()
        if vm.is_paused():
            virsh.resume(vm_name)
        elif vm.is_dead():
            vm.start()
        # Wait for VM in running state
        wait_for_state("running")
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Wait for VM to be in shut off state
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        virsh.managedsave_remove(vm_name)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
Ejemplo n.º 38
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
Ejemplo n.º 39
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    status_error = (params.get("status_error", "no") == "yes")
    convert_err = "Can't convert {0} to integer type"
    try:
        current_vcpu = int(params.get("setvcpus_current", "1"))
    except ValueError:
        test.error(convert_err.format(current_vcpu))
    try:
        max_vcpu = int(params.get("setvcpus_max", "4"))
    except ValueError:
        test.error(convert_err.format(max_vcpu))
    try:
        count = params.get("setvcpus_count", "")
        if count:
            count = eval(count)
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))

    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, 'tmp.xml')
    set_topology = (params.get("set_topology", "no") == "yes")
    sockets = params.get("sockets")
    cores = params.get("cores")
    threads = params.get("threads")

    # Early death 1.1
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM")
                               or local_ip.count("EXAMPLE.COM")):
        test.cancel("remote/local ip parameters not set.")

    # Early death 1.2
    option_list = options.split(" ")
    for item in option_list:
        if virsh.has_command_help_match(command, item) is None:
            test.cancel("The current libvirt version"
                        " doesn't support '%s' option" % item)

    # Init expect vcpu count values
    exp_vcpu = {
        'max_config': max_vcpu,
        'max_live': max_vcpu,
        'cur_config': current_vcpu,
        'cur_live': current_vcpu,
        'guest_live': current_vcpu
    }

    def set_expected(vm, options):
        """
        Set the expected vcpu numbers

        :param vm: vm object
        :param options: setvcpus options
        """
        if ("config" in options) or ("current" in options and vm.is_dead()):
            if "maximum" in options:
                exp_vcpu["max_config"] = count
            else:
                exp_vcpu['cur_config'] = count
        if ("live" in options) or ("current" in options and vm.is_alive()):
            exp_vcpu['cur_live'] = count
            exp_vcpu['guest_live'] = count
        if options == '':
            # when none given it defaults to live
            exp_vcpu['cur_live'] = count
            exp_vcpu['guest_live'] = count

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #
    try:
        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        topology = vmxml.get_cpu_topology()
        if all([topology, sockets, cores, threads]) or set_topology:
            vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, sockets, cores,
                               threads, True)
        else:
            vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu)

        if topology and ("config"
                         and "maximum" in options) and not status_error:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1426220
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            del vmxml.cpu
            vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        # Get the number of cpus, current value if set, and machine type
        cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options)
        logging.debug(
            "Before run setvcpus: cpu_count=%d, cpu_current=%d,"
            " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'],
            cpu_xml_data['mtype'])

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if cpu_xml_data['vcpu'] == 1 and count == 1:
            logging.debug(
                "Original vCPU count is 1, just checking if setvcpus "
                "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error,
             setvcpu_exit_stderr) = remote_test(remote_ip, local_ip,
                                                remote_pwd, remote_prompt,
                                                vm_name, status_error)
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            status = virsh.setvcpus(dom_option,
                                    count_option,
                                    options,
                                    ignore_status=True,
                                    debug=True)
            if not status_error:
                set_expected(vm, options)
                result = utils_hotplug.check_vcpu_value(vm,
                                                        exp_vcpu,
                                                        option=options)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

    finally:
        cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options)
        logging.debug(
            "After run setvcpus: cpu_count=%d, cpu_current=%d,"
            " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'],
            cpu_xml_data['mtype'])

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error:
        if setvcpu_exit_status == 0:
            test.fail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                test.cancel("guest <os> machine property '%s' "
                            "may be too old to allow hotplug." %
                            cpu_xml_data['mtype'])

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                test.cancel("virsh setvcpu hotplug unsupported, "
                            " mtype=%s" % cpu_xml_data['mtype'])

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command mtype=%s"
                      " stderr=%s" %
                      (cpu_xml_data['mtype'], setvcpu_exit_stderr))
        else:
            if not result:
                test.fail("Test Failed")
Ejemplo n.º 40
0
         try:
             virsh.dump(vm.name, dump_path, debug=True, ignore_status=False)
         except (error.CmdError, OSError), detail:
             err_msg = "Dump %s failed: %s" % (vm.name, detail)
         try:
             os.remove(dump_path)
         except OSError:
             pass
 elif operation == "suspend_resume":
     paused_times = int(params.get("paused_times", 10))
     logging.info("Trying to suspend/resume vm %s times", paused_times)
     while paused_times > 0:
         paused_times -= 1
         try:
             virsh.suspend(vm.name, debug=True, ignore_status=False)
             virsh.resume(vm.name, debug=True, ignore_status=False)
         except error.CmdError, detail:
             err_msg = "Suspend-Resume %s failed: %s" % (vm.name, detail)
 elif operation == "save_restore":
     save_times = int(params.get("save_times", 10))
     logging.info("Trying to save/restore vm %s times", save_times)
     while save_times > 0:
         save_times -= 1
         save_path = os.path.join(data_dir.get_tmp_dir(), "save.file")
         try:
             virsh.save(vm.name, save_path, debug=True,
                        ignore_status=False)
             virsh.restore(save_path, debug=True, ignore_status=False)
         except error.CmdError, detail:
             err_msg = "Save-Restore %s failed: %s" % (vm.name, detail)
         try:
Ejemplo n.º 41
0
def run_virsh_resume(test, params, env):
    """
    Test command: virsh resume.

    1) Start vm, Prepare options such as id, uuid
    2) Prepare vm state for test, default is paused.
    3) Prepare other environment
    4) Run command, get result.
    5) Check result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Get parameters
    vm_ref = params.get("resume_vm_ref", "domname")
    vm_state = params.get("resume_vm_state", "paused")
    option_suffix = params.get("resume_option_suffix")
    status_error = params.get("status_error", "no")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Prepare vm state
    if vm_state == "paused":
        vm.pause()
    elif vm_state == "shutoff":
        vm.destroy()

    # Prepare options
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif domid and vm_ref == "hex_id":
        if domid == "-":
            vm_ref = domid
        else:
            vm_ref = hex(int(domid))

    if option_suffix:
        vm_ref = "%s %s" % (vm_ref, option_suffix)

    # Run resume command
    result = virsh.resume(vm_ref, ignore_status=True)
    logging.debug(result)
    status = result.exit_status

    # Get vm state after virsh resume executed.
    domstate = vm.state()

    # Check status_error
    if status_error == "yes":
        # Wrong resume command was excuted, recover with right resume
        if domstate == "paused":
            vm.resume()
        vm.destroy()
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        # Right resume command failed, forcing destroy vm
        if domstate == "paused":
            vm.destroy(gracefully=False)
            raise error.TestFail("Resume vm failed."
                                 "State is still paused")
        vm.destroy()
        if status != 0:
            raise error.TestFail("Run failed with right command")
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
Ejemplo n.º 43
0
def manipulate_vm(vm, operation, params=None):
    """
    Manipulate the VM.

    :param vm: VM instance
    :param operation: stress_in_vms, inject_nmi, dump, suspend_resume
                      or save_restore
    :param params: Test parameters
    """
    err_msg = ''
    # Special operations for test
    if operation == "stress":
        logging.debug("Load stress in VM")
        err_msg = utils_test.load_stress(operation, params=params, vms=[vm])[0]
    elif operation == "inject_nmi":
        inject_times = int(params.get("inject_times", 10))
        logging.info("Trying to inject nmi %s times", inject_times)
        while inject_times > 0:
            try:
                inject_times -= 1
                virsh.inject_nmi(vm.name, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Inject nmi failed: %s" % detail
    elif operation == "dump":
        dump_times = int(params.get("dump_times", 10))
        logging.info("Trying to dump vm %s times", dump_times)
        while dump_times > 0:
            dump_times -= 1
            dump_path = os.path.join(data_dir.get_tmp_dir(), "dump.file")
            try:
                virsh.dump(vm.name, dump_path, debug=True, ignore_status=False)
            except (process.CmdError, OSError) as detail:
                err_msg = "Dump %s failed: %s" % (vm.name, detail)
            try:
                os.remove(dump_path)
            except OSError:
                pass
    elif operation == "suspend_resume":
        paused_times = int(params.get("paused_times", 10))
        logging.info("Trying to suspend/resume vm %s times", paused_times)
        while paused_times > 0:
            paused_times -= 1
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s failed: %s" % (vm.name, detail)
    elif operation == "save_restore":
        save_times = int(params.get("save_times", 10))
        logging.info("Trying to save/restore vm %s times", save_times)
        while save_times > 0:
            save_times -= 1
            save_path = os.path.join(data_dir.get_tmp_dir(), "save.file")
            try:
                virsh.save(vm.name, save_path, debug=True,
                           ignore_status=False)
                virsh.restore(save_path, debug=True, ignore_status=False)
            except process.CmdError as detail:
                err_msg = "Save-Restore %s failed: %s" % (vm.name, detail)
            try:
                os.remove(save_path)
            except OSError:
                pass
    else:
        err_msg = "Unsupport operation in this function: %s" % operation
    return err_msg
Ejemplo n.º 44
0
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    time_before_save = int(params.get('time_before_save', 0))

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if "--xml" in extra_param:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--migratable")
        backup_xml = vmxml.copy()
        # Grant more priveledge on the file in order for un-priveledge user
        # to access.
        os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
        extra_param = "--xml %s" % vmxml.xml
        dict_os_attrs = {}
        if "hd" in vmxml.os.boots:
            dict_os_attrs.update({"boots": ["cdrom"]})
            vmxml.set_os_attrs(**dict_os_attrs)
        else:
            test.cancel("Please add 'hd' in boots for --xml testing")
        logging.info("vmxml os is %s after update" % vmxml.os.xmltreefile)

    # run test
    if vm_ref == "" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            try:
                status, output = session.cmd_status_output(cmd, timeout=10)
            finally:
                session.close()
            if not re.search("processor", output):
                test.fail("Unable to read /proc/cpuinfo")
        tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file")
        time.sleep(time_before_save)
        ret = virsh.save(vm_name, tmp_file, debug=True)
        libvirt.check_exit_status(ret)
        if vm_ref == "saved_file":
            vm_ref = tmp_file
        elif vm_ref == "empty_new_file":
            tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file")
            with open(tmp_file, 'w') as tmp:
                pass
            vm_ref = tmp_file
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    if not status_error:
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    try:
        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                    logging.info("From libvirt version 5.6.0 libvirtd is restarted "
                                 "and command should succeed")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status:
                test.fail("Run failed with right command")
            if not re.search(vm_name, list_output):
                test.fail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            if (extra_param.count("running") or
                    extra_param.count("xml") or
                    not extra_param):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after restore")
            if extra_param.count("xml"):
                aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                boots_list = aft_vmxml.os.boots
                if "hd" in boots_list or "cdrom" not in boots_list:
                    test.fail("Update xml with restore failed")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
        if "--xml" in extra_param:
            backup_xml.sync()
Ejemplo n.º 45
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    utils.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK))

    dump_path = os.path.join(test.tmpdir, "dump/")
    dump_file = ""
    if vm_action == "crash":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Set on_crash action
        vmxml.on_crash = vm_oncrash_action
        # Add <panic> device to domain
        panic_dev = Panic()
        panic_dev.addr_type = "isa"
        panic_dev.addr_iobase = "0x505"
        vmxml.add_device(panic_dev)
        vmxml.sync()
        # Config auto_dump_path in qemu.conf
        cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path, QEMU_CONF)
        utils.run(cmd)
        libvirtd_service.restart()
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            dump_file = dump_path + vm_name + "-*"
        # Start VM and check the panic device
        virsh.start(vm_name, ignore_status=False)
        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Skip this test if no panic device find
        if not vmxml_new.xmltreefile.find('devices').findall('panic'):
            raise error.TestNAError("No 'panic' device in the guest, maybe "
                                    "your libvirt version doesn't support it")
    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
        elif vm_action == "kill":
            libvirtd_service.stop()
            kill_process_by_pattern(vm_name)
            libvirtd_service.restart()
        elif vm_action == "crash":
            session = vm.wait_for_login()
            # Stop kdump in the guest
            session.cmd("service kdump stop", ignore_all_errors=True)
            # Enable sysRq
            session.cmd("echo 1 > /proc/sys/kernel/sysrq")
            # Send key ALT-SysRq-c to crash VM, and command will not return
            # as vm crashed, so fail early for 'destroy' and 'preserve' action.
            # For 'restart', 'coredump-restart' and 'coredump-destroy' actions,
            # they all need more time to dump core file or restart OS, so using
            # the default session command timeout(60s)
            try:
                if vm_oncrash_action in ['destroy', 'preserve']:
                    timeout = 3
                else:
                    timeout = 60
                session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout)
            except ShellTimeoutError:
                pass
            session.close()
    except error.CmdError, e:
        raise error.TestError("Guest prepare action error: %s" % e)
Ejemplo n.º 46
0
def run(test, params, env):
    """
    Test command: virsh suspend.

    The command can suspend a domain.
    1.Prepare test environment.
    2.Perform virsh suspend operation.
    3.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("suspend_vm_ref", "")
    extra = params.get("suspend_extra", "")
    status_error = params.get("status_error", "no")
    suspend_readonly = "yes" == params.get("suspend_readonly", "no")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    no_err_msg = False
    if not libvirt_version.version_compare(1, 1, 1):
        # Suspend may fail without error message if domain name is ''
        # on old version libvirt
        if vm_ref == '':
            no_err_msg = True
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current libvirt "
                        "version.")

    # Run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid.strip()))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, extra)
    elif vm_ref == "uuid":
        vm_ref = domuuid

    result = virsh.suspend(vm_ref, ignore_status=True,
                           unprivileged_user=unprivileged_user,
                           uri=uri, debug=True)
    status = result.exit_status
    output = result.stdout.strip()
    err = result.stderr.strip()
    if status == 0 and not vm.is_paused():
        status = 1

    # Test the readonly mode
    if suspend_readonly:
        result = virsh.suspend(vm_ref, ignore_status=True, debug=True, readonly=True)
        libvirt.check_exit_status(result, expect_error=True)
        # This is for status_error check
        status = result.exit_status
        if status:
            err = 1

    # resume the VM
    if vm.is_paused():
        result = virsh.resume(vm_ref, ignore_status=True,
                              unprivileged_user=unprivileged_user,
                              uri=uri, debug=True)
        if result.exit_status or vm.is_paused():
            status = 1

    # Check result
    if status_error == "yes":
        if not err and not no_err_msg:
            test.fail("No error hint to user about bad command!")
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or output == "":
            test.fail("Run failed with right command")
    else:
        test.fail("The status_error must be 'yes' or 'no'!")
Ejemplo n.º 47
0
def run(test, params, env):
    """
    Test command: virsh save.

    The command can save the RAM state of a running domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh save command with assigned options.
    4.Recover test environment.(If the libvirtd service is stopped ,start
      the libvirtd service.)
    5.Confirm the test result.

    """
    savefile = params.get("save_file", "save.file")
    if savefile:
        savefile = os.path.join(test.tmpdir, savefile)
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("save_extra_param")
    vm_ref = params.get("save_vm_ref")
    progress = ("yes" == params.get("save_progress", "no"))
    options = params.get("save_option", "")
    status_error = ("yes" == params.get("save_status_error", "yes"))
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # set the option
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.count("invalid"):
        vm_ref = params.get(vm_ref)
    elif vm_ref.count("name"):
        vm_ref = vm_name
    vm_ref += (" %s" % extra_param)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if progress:
        options += " --verbose"
    result = virsh.save(vm_ref, savefile, options, ignore_status=True,
                        unprivileged_user=unprivileged_user,
                        uri=uri, debug=True)
    status = result.exit_status
    err_msg = result.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    if savefile:
        virsh.restore(savefile)

    # check status_error
    try:
        if status_error:
            if not status:
                raise error.TestFail("virsh run succeeded with an "
                                     "incorrect command")
        else:
            if status:
                raise error.TestFail("virsh run failed with a "
                                     "correct command")
            if progress and not err_msg.count("Save:"):
                raise error.TestFail("No progress information outputed!")
            if options.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after restore"
                                         " due to the option --running")
            elif options.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after restore"
                                         " due to the option --paused")
            else:
                if vm.is_dead():
                    raise error.TestFail("Guest state should be"
                                         " alive after restore"
                                         " since no option was specified")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Ejemplo n.º 48
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported "
                    "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    test.error("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install("swtpm*"):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {"loader_readonly": "yes",
                         "secure": "yes",
                         "loader_type": "pflash",
                         "loader": loader,
                         "nvram": nvram}
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                        True, timeout=360, debug=True)
            if ret_clone.exit_status:
                test.error("Need more than one domains, but error occured when virt-clone.")
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s backend version xml for tpm dev "
                          "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append("-chardev.socket,id=chrtpm,"
                                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" % pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail('swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output("systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel("tpm-crb passthrough only works with host tpm2.0, "
                                    "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output("ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail("Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s", tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {"sec_ephemeral": "no",
                                         "sec_private": "yes",
                                         "sec_desc": "sample vTPM secret",
                                         "sec_usage": "vtpm",
                                         "sec_name": "VTPM_example"}
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name, ignore_status=True, debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
Ejemplo n.º 49
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            utils_package.package_install('gcc', session)
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status, output = session.cmd_status_output(cmd)
        logging.debug(output)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" % (feature,
                                                   guest_version,
                                                   host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" % (guest_version,
                                                   host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu,
                                           sockets=sockets, cores=cores,
                                           threads=threads, add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        logging.debug(virsh.dumpxml(vm_name))
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name, int(max_vcpu), "--live",
                           ignore_status=False, debug=True)
            if not cpu.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name, save_file, ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
Ejemplo n.º 50
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    dump_path = os.path.join(test.tmpdir, "dump/")
    os.mkdir(dump_path)
    dump_file = ""
    try:
        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                panic_dev.addr_type = "isa"
                panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd_service.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                raise exceptions.TestSkipError(
                    "No 'panic' device in the guest. Maybe your libvirt "
                    "version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                libvirtd_service.stop()
                utils_misc.kill_process_by_pattern(vm_name)
                libvirtd_service.restart()
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
        except process.CmdError, detail:
            raise exceptions.TestError("Guest prepare action error: %s" %
                                       detail)

        if libvirtd_state == "off":
            libvirtd_service.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref,
                                    extra,
                                    ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                raise exceptions.TestFail(
                    "Run successfully with wrong command!")
        else:
            if status or not output:
                raise exceptions.TestFail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        raise ActionError(vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        raise ActionError(vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        raise ActionError(vm_action)
                elif vm_action == "start":
                    if not output.count("booted"):
                        raise ActionError(vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        raise ActionError(vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        raise ActionError(vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    raise exceptions.TestFail("Run failed with right command")
Ejemplo n.º 51
0
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator",
                               "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in ["session    required     pam_selinux.so close",
                      "session    required     pam_selinux.so open",
                      "session    required     pam_loginuid.so"]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root), shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root), shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)
Ejemplo n.º 52
0
def run(test, params, env):
    """
    Test command: virsh save.

    The command can save the RAM state of a running domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh save command with assigned options.
    4.Recover test environment.(If the libvirtd service is stopped ,start
      the libvirtd service.)
    5.Confirm the test result.

    """
    savefile = params.get("save_file", "save.file")
    if savefile:
        savefile = os.path.join(data_dir.get_tmp_dir(), savefile)
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("save_extra_param")
    vm_ref = params.get("save_vm_ref")
    progress = ("yes" == params.get("save_progress", "no"))
    options = params.get("save_option", "")
    status_error = ("yes" == params.get("save_status_error", "yes"))
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    uri = params.get("virsh_uri")
    readonly = ("yes" == params.get("save_readonly", "no"))
    expect_msg = params.get("save_err_msg", "")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # set the option
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.count("invalid"):
        vm_ref = params.get(vm_ref)
    elif vm_ref.count("name"):
        vm_ref = vm_name
    vm_ref += (" %s" % extra_param)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if progress:
        options += " --verbose"
    result = virsh.save(vm_ref,
                        savefile,
                        options,
                        ignore_status=True,
                        unprivileged_user=unprivileged_user,
                        uri=uri,
                        debug=True,
                        readonly=readonly)
    status = result.exit_status
    err_msg = result.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    if savefile:
        virsh.restore(savefile, debug=True)

    # check status_error
    try:
        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed")
                else:
                    test.fail("virsh run succeeded with an "
                              "incorrect command")
            if readonly:
                if not re.search(expect_msg, err_msg):
                    test.fail("Fail to get expect err msg: %s" % expect_msg)
        else:
            if status:
                test.fail("virsh run failed with a " "correct command")
            if progress and not err_msg.count("Save:"):
                test.fail("No progress information outputted!")
            if options.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after restore"
                              " due to the option --running")
            elif options.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            else:
                if vm.is_dead():
                    test.fail("Guest state should be"
                              " alive after restore"
                              " since no option was specified")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Ejemplo n.º 53
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocado_test":
                testlist = utils_test.get_avocadotestlist(params)
                bt = utils_test.run_avocado_bg(vm, params, test, testlist)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms",
                                       params=params,
                                       vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        max_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': max_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': max_vcpu,
                    'guest_live': max_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel(
                        "Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + ".save")
                result = virsh.save(vm_name,
                                    save_file,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocado_test":
                guestbt.join()
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms",
                                         params=params,
                                         vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name,
                                        current_vcpu,
                                        "--live",
                                        ignore_status=True,
                                        debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {
                    'max_config': max_vcpu,
                    'max_live': current_vcpu,
                    'cur_config': current_vcpu,
                    'cur_live': current_vcpu,
                    'guest_live': current_vcpu
                }
                result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(
                        os.path.join(root_cpuset_path, "machine.slice")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(
                        os.path.join(root_cpuset_path, "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path,
                                                "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
Ejemplo n.º 54
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login("ssh", remote_ssh_addr, "22", "root",
                                      remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(dom_option,
                                count_option,
                                options,
                                ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
Ejemplo n.º 55
0
def run(test, params, env):
    """
    Test command: virsh save-image-dumpxml <file>
                  virsh save-image-define <file> [--xml <string>]

    1) Prepare test environment.
    2) Execute save-image-dumpxml to get VM xml description
    3) Edit the xml string and call virsh save-image-define to redefine it
    4) Restore VM
    5) Check the new xml of the VM
    """

    def get_image_xml():
        # Invoke save-image-dumpxml
        cmd_result = virsh.save_image_dumpxml(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to dump xml from "
                                 "saved state file:%s" % vm_save)

        xml = cmd_result.stdout.strip()

        match_string = "<name>%s</name>" % vm_name
        if not re.search(match_string, xml):
            raise error.TestFail("The xml from saved state file "
                                 "is invalid")
        return xml

    def redefine_new_xml():
        if restore_state == "running":
            option = "--running"
        elif restore_state == "paused":
            option = "--paused"
        else:
            raise error.TestFail("Unknown save-image-define option")

        cmd_result = virsh.save_image_define(vm_save, xmlfile, option,
                                             debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to redefine new xml %s for %s" %
                                 (xmlfile, vm_save))

    def vm_state_check():
        cmd_result = virsh.dumpxml(vm_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to dump xml of domain %s" % vm_name)

        # The xml should contain the match_string
        xml = cmd_result.stdout.strip()
        match_string = "<boot dev='cdrom'/>"
        if not re.search(match_string, xml):
            raise error.TestFail("After domain restore, "
                                 "the xml is not expected")

        domstate = virsh.domstate(vm_name, debug=True).stdout.strip()
        if restore_state != domstate:
            raise error.TestFail("The domain state is not expected")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")

    restore_state = params.get("restore_state", "running")
    vm_save = params.get("vm_save", "vm.save")

    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()

        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to save running domain %s" % vm_name)

        xml = get_image_xml()

        # Replace <boot dev='hd'/> to <boot dev='cdrom'/>
        newxml = xml.replace("<boot dev='hd'/>", "<boot dev='cdrom'/>")
        logging.debug("After string replacement, the new xml is %s", newxml)

        # Write new xml into a tempfile
        tmp_file = tempfile.NamedTemporaryFile(prefix=("new_vm_xml_"),
                                               dir=tmp_dir)
        xmlfile = tmp_file.name
        tmp_file.close()

        fd = open(xmlfile, 'w')
        fd.write(newxml)
        fd.close()

        # Redefine new xml for domain's saved state file
        redefine_new_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to restore domain %s" % vm_name)
        os.remove(vm_save)

        vm_state_check()

    finally:
        # cleanup
        if restore_state == "paused":
            virsh.resume(vm_name)

        if os.path.exists(vm_save):
            virsh.restore(vm_save)
            os.remove(vm_save)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
Ejemplo n.º 56
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            utils_package.package_install('gcc', session)
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status, output = session.cmd_status_output(cmd)
        logging.debug(output)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" % (feature,
                                                   guest_version,
                                                   host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" % (guest_version,
                                                   host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu,
                                           sockets=sockets, cores=cores,
                                           threads=threads, add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        logging.debug(virsh.dumpxml(vm_name))
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name, int(max_vcpu), "--live",
                           ignore_status=False, debug=True)
            if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name, save_file, ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
Ejemplo n.º 57
0
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator", "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in [
                    "session    required     pam_selinux.so close",
                    "session    required     pam_selinux.so open",
                    "session    required     pam_loginuid.so"
            ]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root),
                            shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root),
                            shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
Ejemplo n.º 59
0
    def run_test(dev_type, params, test_obj=None):
        """
        Test domain lifecycle

        1) Start the vm and check network
        2) Destroy and start the VM, and check network
        3) Save and restore, and check network
        4) Suspend and resume, and check network
        5) Reboot the VM and check the network
        """
        # Setup Iface device
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_dict = eval(params.get('iface_dict', '{}'))
        iface_dev = interface_base.create_iface(dev_type, iface_dict)
        libvirt.add_vm_device(vmxml, iface_dev)

        logging.info("Start a VM with a '%s' type interface.", dev_type)
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 **params)

        logging.info("Destroy and start the VM.")
        virsh.destroy(vm.name, **VIRSH_ARGS)
        virsh.start(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=True,
                                                 **params)

        logging.info("Save the VM.")
        save_error = "yes" == params.get("save_error", "no")
        save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save')
        res = virsh.save(vm.name, 'sss', debug=True)
        libvirt.check_exit_status(res, expect_error=save_error)
        if not save_error:
            logging.info("Restore vm.")
            virsh.restore(save_path, **VIRSH_ARGS)
            check_points.check_network_accessibility(vm,
                                                     test_obj=test_obj,
                                                     config_vdpa=False,
                                                     **params)

        logging.info("Suspend and resume the vm.")
        virsh.suspend(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "paused"):
            test.fail("VM should be paused!")
        virsh.resume(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail("VM should be running!")
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)

        logging.debug("Reboot VM and check network.")
        virsh.reboot(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)