def vm_msave_remove_check(vm_name):
     """
     Check managed save remove command.
     """
     if not os.path.exists(managed_save_file):
         test.fail("Can't find managed save image")
     virsh.managedsave_remove(vm_name, debug=True)
     if os.path.exists(managed_save_file):
         test.fail("Managed save image still exists")
     virsh.start(vm_name, debug=True)
     # The domain state should be running
     if vm.state() != "running":
         test.fail("Guest state should be" " running after started")
Beispiel #2
0
 def vm_msave_remove_check(vm_name):
     """
     Check managed save remove command.
     """
     if not os.path.exists(managed_save_file):
         raise error.TestFail("Can't find managed save image")
     virsh.managedsave_remove(vm_name)
     if os.path.exists(managed_save_file):
         raise error.TestFail("Managed save image still exists")
     virsh.start(vm_name)
     # The domain state should be running
     if vm.state() != "running":
         raise error.TestFail("Guest state should be"
                              " running after started")
Beispiel #3
0
 def vm_msave_remove_check(vm_name):
     """
     Check managed save remove command.
     """
     if not os.path.exists(managed_save_file) and case not in ['not_saved_without_file', 'saved_without_file']:
         test.fail("Can't find managed save image")
     ret = virsh.managedsave_remove(vm_name, debug=True)
     libvirt.check_exit_status(ret, msave_rm_error)
     if os.path.exists(managed_save_file):
         test.fail("Managed save image still exists")
     virsh.start(vm_name, debug=True)
     # The domain state should be running
     if vm.state() != "running":
         test.fail("Guest state should be"
                   " running after started")
Beispiel #4
0
def run(test, params, env):
    """
    Test interface xml options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Perform test operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    if (vm_name != "lxc_test_vm1"):
        vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}
    hook_file = params.get("hook_file", "/etc/libvirt/hooks/qemu")
    hook_log = params.get("hook_log", "/tmp/qemu.log")
    machine_type = params.get("machine_type", "")

    def prepare_hook_file(hook_op):
        """
        Create hook file.
        """
        logging.info("hook script: %s", hook_op)
        hook_lines = hook_op.split(';')
        hook_dir = os.path.dirname(hook_file)
        logging.info("hook script: %s", hook_op)
        if not os.path.exists(hook_dir):
            os.mkdir(hook_dir)
        with open(hook_file, 'w') as hf:
            hf.write('\n'.join(hook_lines))
        os.chmod(hook_file, 0o755)

        # restart libvirtd
        libvirtd.restart()
        if utils_split_daemons.is_modular_daemon() and test_network:
            utils_libvirtd.Libvirtd("virtnetworkd").restart()

    def check_hooks(opt):
        """
        Check hook operations in log file
        """
        logging.debug("Trying to check the string '%s'" " in logfile", opt)
        if not os.path.exists(hook_log):
            logging.debug("Log file doesn't exist")
            return False

        logs = None
        with open(hook_log, 'r') as lf:
            logs = lf.read()
        if not logs:
            return False

        logging.debug("Read from hook log file: %s", logs)
        if opt in logs:
            return True
        else:
            return False

    def start_stop_hook():
        """
        Do start/stop operation and check the results.
        """
        logging.info("Try to test start/stop hooks...")
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script % (vm_name, hook_log))
        vm.start()
        vm.wait_for_login().close()
        try:
            hook_str = hook_para + " prepare begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " started begin -"
            assert check_hooks(hook_str)
            # stop the vm
            vm.destroy()
            hook_str = hook_para + " stopped end -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " release end -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check start/stop hooks.")

    def save_restore_hook():
        """
        Do save/restore operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script % (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script % (vm_name, hook_log))
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for save operation")
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for restore operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()
        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check restore hooks.")

    def managedsave_hook():
        """
        Do managedsave operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script % (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script % (vm_name, hook_log))
        ret = virsh.managedsave(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for" " managedsave operation")
        vm.start()
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for" " managedsave operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()

        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check managedsave hooks.")

    def libvirtd_hook():
        """
        Check the libvirtd hooks.
        """
        prepare_hook_file(hook_script % (vm_name, hook_log))
        hook_para = "%s %s" % (hook_file, vm_name)
        time.sleep(2)
        libvirtd.restart()
        try:
            hook_str = hook_para + " reconnect begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check libvirtd hooks")

    def lxc_hook():
        """
        Check the lxc hooks.
        """

        if platform.platform().count('el8'):
            test.cancel("lxc is not supported in rhel8")
        test_xml = vm_xml.VMXML("lxc")

        root_dir = data_dir.get_root_dir()
        lxc_xml_related_path_file = params.get("lxc_xml_file")
        lxc_xml_path_file = os.path.join(root_dir, lxc_xml_related_path_file)
        with open(lxc_xml_path_file, 'r') as fd:
            test_xml.xml = fd.read()

        uri = "lxc:///"
        vm_name = "lxc_test_vm1"
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script % hook_log)
        exit1 = params.get("exit1", "no")
        output = virsh.create(test_xml.xml, options="--console", uri=uri)

        if output.exit_status:
            logging.debug("output.stderr1: %s", output.stderr.lower())
            if (exit1 == "yes" and "hook script execution failed"
                    in output.stderr.lower()):
                return True
            else:
                test.fail("Create %s domain failed:%s" %
                          ("lxc", output.stderr))
        logging.info("Domain %s created, will check with console", vm_name)

        hook_str = hook_para + " prepare begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)
        hook_str = hook_para + " start begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)

        virsh.destroy(vm_name, options="", uri=uri)

        hook_str = hook_para + " stopped end -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)
        hook_str = hook_para + " release end -"
        if not check_hooks(hook_str):
            test.fail("Failed to check lxc hook string: %s" % hook_str)

    def daemon_hook():
        """
        Check the libvirtd hooks.
        """
        # stop daemon first
        libvirtd.stop()
        prepare_hook_file(hook_script % hook_log)
        try:
            libvirtd.start()
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)
            # Restart libvirtd and test again
            if os.path.exists(hook_log):
                os.remove(hook_log)
            libvirtd.restart()
            hook_str = hook_file + " - shutdown - shutdown"
            assert check_hooks(hook_str)
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)

            # kill the daemon with SIGHUP
            if os.path.exists(hook_log):
                os.remove(hook_log)

            daemon_process = utils_libvirtd.Libvirtd().service_name
            utils_misc.signal_program(daemon_process, 1, '/var/run')

            hook_str = hook_file + " - reload begin SIGHUP"
            assert check_hooks(hook_str)

        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check daemon hooks")

    def attach_hook():
        """
        Check attach hooks.
        """
        # Start a domain with qemu command.
        disk_src = vm.get_first_disk_devices()['source']
        vm_test = "foo"
        prepare_hook_file(hook_script % (vm_test, hook_log))
        qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
        if "ppc" in platform.machine():
            qemu_cmd = ("%s -machine pseries"
                        " -drive file=%s,if=none,bus=0,unit=1"
                        " -monitor unix:/tmp/demo,"
                        "server,nowait -name %s" %
                        (qemu_bin, disk_src, vm_test))
        else:
            qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                        " -monitor unix:/tmp/demo,"
                        "server,nowait -name %s" %
                        (qemu_bin, disk_src, vm_test))
        # After changed above command, qemu-attach failed
        os.system('%s &' % qemu_cmd)
        sta, pid = process.getstatusoutput("pgrep qemu-kvm")
        if not pid:
            test.fail("Cannot get pid of qemu command")
        try:
            ret = virsh.qemu_attach(pid, **virsh_dargs)
            if ret.exit_status:
                utils_misc.kill_process_tree(pid)
                test.fail("Cannot attach qemu process")
            else:
                virsh.destroy(vm_test)
        except Exception as detail:
            utils_misc.kill_process_tree(pid)
            test.fail("Failed to attach qemu process: %s" % str(detail))
        hook_str = hook_file + " " + vm_test + " attach begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check attach hooks")

    def edit_iface(net_name):
        """
        Edit interface options for vm.
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        iface_xml = vmxml.get_devices(device_type="interface")[0]
        vmxml.del_device(iface_xml)
        iface_xml.type_name = "network"
        iface_xml.source = {"network": net_name}
        del iface_xml.address
        vmxml.add_device(iface_xml)
        vmxml.sync()

    def network_hook():
        """
        Check network hooks.
        """
        # Set interface to use default network
        net_name = params.get("net_name", "default")
        edit_iface(net_name)
        prepare_hook_file(hook_script % (net_name, hook_log))
        try:
            # destroy the network
            ret = virsh.net_destroy(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " stopped end -"
            assert check_hooks(hook_str)

            # start network
            ret = virsh.net_start(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_file + " " + net_name + " started begin -"
            assert check_hooks(hook_str)
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Remove all controllers, interfaces and addresses in vm dumpxml
            vm_inactive_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm_inactive_xml.remove_all_device_by_type('controller')
            type_dict = {'address': '/devices/*/address'}
            try:
                for elem in vm_inactive_xml.xmltreefile.findall(
                        type_dict['address']):
                    vm_inactive_xml.xmltreefile.remove(elem)
            except (AttributeError, TypeError) as details:
                test.fail("Fail to remove address.")
            vm_inactive_xml.xmltreefile.write()
            machine_list = vm_inactive_xml.os.machine.split("-")

            # Modify machine type according to the requirements and Add controllers to VM according to machine type

            def generate_controller(controller_dict):
                controller_xml = Controller("controller")
                controller_xml.model = controller_dict['model']
                controller_xml.type = controller_dict['type']
                controller_xml.index = controller_dict['index']
                return controller_xml

            if machine_type == 'pc':
                vm_inactive_xml.set_os_attrs(
                    **{
                        "machine": machine_list[0] + "-i440fx-" +
                        machine_list[2]
                    })
                pc_Dict0 = {'model': 'pci-root', 'type': 'pci', 'index': 0}
                pc_Dict1 = {'model': 'pci-bridge', 'type': 'pci', 'index': 1}
                vm_inactive_xml.add_device(generate_controller(pc_Dict0))
                vm_inactive_xml.add_device(generate_controller(pc_Dict1))
            elif machine_type == 'q35':
                vm_inactive_xml.set_os_attrs(
                    **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
                q35_Dict0 = {'model': 'pcie-root', 'type': 'pci', 'index': 0}
                q35_Dict1 = {
                    'model': 'pcie-root-port',
                    'type': 'pci',
                    'index': 1
                }
                q35_Dict2 = {
                    'model': 'pcie-to-pci-bridge',
                    'type': 'pci',
                    'index': 2
                }
                vm_inactive_xml.add_device(generate_controller(q35_Dict0))
                vm_inactive_xml.add_device(generate_controller(q35_Dict1))
                vm_inactive_xml.add_device(generate_controller(q35_Dict2))
            vm_inactive_xml.sync()

            # Plug a interface and Unplug the interface
            vm.start()
            vm.wait_for_login().close()
            interface_num = len(
                vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                    "interface"))
            mac_addr = "52:54:00:9a:53:a9"
            logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name))

            def is_attached_interface():
                return len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) == interface_num + 1

            ret = virsh.attach_interface(vm_name, ("network %s --mac %s" %
                                                   (net_name, mac_addr)))
            libvirt.check_exit_status(ret)
            if utils_misc.wait_for(is_attached_interface,
                                   timeout=20) is not True:
                test.fail("Attaching interface failed.")
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-created begin -"
            else:
                hook_str = hook_file + " " + net_name + " plugged begin -"
            assert check_hooks(hook_str)

            def is_detached_interface():
                return len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) == interface_num

            ret = virsh.detach_interface(vm_name,
                                         "network --mac %s" % mac_addr)
            libvirt.check_exit_status(ret)
            utils_misc.wait_for(is_detached_interface, timeout=50)
            # Wait for timeout and if not succeeded, detach again (during testing, detaching interface failed from q35 VM for the first time when using this function)
            if len(
                    vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                        "interface")) != interface_num:
                ret = virsh.detach_interface(vm_name,
                                             "network --mac %s" % mac_addr)
                libvirt.check_exit_status(ret)
            if utils_misc.wait_for(is_detached_interface,
                                   timeout=50) is not True:
                test.fail("Detaching interface failed.")
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-deleted begin -"
            else:
                hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
            # remove the log file
            if os.path.exists(hook_log):
                os.remove(hook_log)
            # destroy the domain
            vm.destroy()
            if libvirt_version.version_compare(6, 0, 0):
                hook_str = hook_file + " " + net_name + " port-deleted begin -"
            else:
                hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check network hooks")

    def run_scale_test():
        """
        Try to start and stop domain many times.
        """
        prepare_hook_file(hook_script)
        loop_num = int(params.get("loop_num", 30))
        loop_timeout = int(params.get("loop_timeout", 600))
        cmd1 = ("for i in {1..%s};do echo $i 'start guest -';"
                "virsh start %s;sleep 1;echo $i 'stop guest -';"
                "virsh destroy %s;sleep 1;done;" %
                (loop_num, vm_name, vm_name))
        cmd2 = ("for i in {1..%s};do virsh list;sleep 1;done;" % loop_num * 2)
        utils_misc.run_parallel([cmd1, cmd2], timeout=loop_timeout)

    start_error = "yes" == params.get("start_error", "no")
    test_start_stop = "yes" == params.get("test_start_stop", "no")
    test_lxc = "yes" == params.get("test_lxc", "no")
    test_attach = "yes" == params.get("test_attach", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_saverestore = "yes" == params.get("test_saverestore", "no")
    test_daemon = "yes" == params.get("test_daemon", "no")
    test_network = "yes" == params.get("test_network", "no")
    if not test_lxc:
        basic_test = "yes" == params.get("basic_test", "yes")
        scale_test = "yes" == params.get("scale_test", "yes")
    else:
        basic_test = "no" == params.get("basic_test", "yes")
        scale_test = "no" == params.get("scale_test", "yes")
    domainxml_test = "yes" == params.get("domainxml_test", "no")

    # The hook script is provided from config
    hook_script = params.get("hook_script")

    # Destroy VM first
    if vm_name != "lxc_test_vm1" and vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    if vm_name != "lxc_test_vm1":
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        try:
            if test_start_stop:
                start_stop_hook()
            elif test_attach:
                attach_hook()
            elif start_error:
                prepare_hook_file(hook_script % (vm_name, hook_log))
            elif test_daemon:
                daemon_hook()
            elif test_network:
                network_hook()
            elif scale_test:
                run_scale_test()
            # Start the domain
            if vm_name != "lxc_test_vm1" and vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if test_libvirtd:
                libvirtd_hook()
            elif test_saverestore:
                save_restore_hook()
            elif test_managedsave:
                managedsave_hook()
            if test_lxc:
                lxc_hook()

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if start_error:
                pass
            else:
                test.fail('VM Failed to start for some reason!')
        else:
            if start_error:
                test.fail('VM started unexpected')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if test_managedsave:
            virsh.managedsave_remove(vm_name)
        if vm_name != "lxc_test_vm1" and vm.is_alive():
            vm.destroy(gracefully=False)
        if os.path.exists(hook_file):
            os.remove(hook_file)
        if os.path.exists(hook_log):
            os.remove(hook_log)
        libvirtd.restart()
        if vm_name != "lxc_test_vm1":
            vmxml_backup.sync()
Beispiel #5
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s"
                             % (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                         remote_passwd, hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(
                command_on_remote, internal_timeout=30)
        except Exception as info:
            logging.error("Shell failed to execute command from"
                          " remote")
            return 1, info
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")
    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    desc = params.get("list_desc", "")
    libvirtd = params.get("libvirtd", "on")
    remote_ref = params.get("remote_ref", "")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip")
    remote_user = params.get("remote_user", "root")
    local_user = params.get("username", "root")
    local_pwd = params.get("local_pwd", None)

    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.help("list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise exceptions.TestSkipError("This version do not support vm type:%s"
                                       % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
            result_expected = vm_name
            logging.info("domain's name is: %s", vm_name)

        if options_ref == "vm_id":
            logging.info("%s's running-id is: %s", vm_name, domid)
            options_ref = "%s %s" % (domid, list_ref)
        elif options_ref == "vm_uuid":
            logging.info("%s's uuid is: %s", vm_name, domuuid)
            options_ref = "%s %s" % (domuuid, list_ref)
        elif options_ref == "inactive":
            vm.destroy()
            options_ref = "--inactive %s" % list_ref
        elif options_ref == "vm_name":
            options_ref = "%s %s" % (vm_name, list_ref)
        elif options_ref == "all":
            options_ref = "--all %s" % list_ref
        elif options_ref == "":
            options_ref = "%s" % list_ref

        # Prepare libvirtd status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if remote_ref == "remote":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Remote test parameters unchanged from default")
            logging.info("Execute virsh command on remote host %s.", remote_ip)
            status, output = list_local_domains_on_remote(options_ref,
                                                          remote_ip,
                                                          remote_pwd,
                                                          local_ip,
                                                          remote_user,
                                                          local_user,
                                                          local_pwd)
            logging.info("Status:%s", status)
            logging.info("Output:\n%s", output)
        else:
            if vm_ref:
                options_ref = "%s --%s" % (options_ref, vm_ref)
            result = virsh.dom_list(
                options_ref, ignore_status=True, print_info=True)
            status = result.exit_status
            output = result.stdout.strip()

    except Exception as output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name, ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
            if status:
                raise exceptions.TestFail("Run failed with right command.")
            if not re.search(result_expected, output):
                raise exceptions.TestFail("Run successful but result is not"
                                          " expected.")
Beispiel #6
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    # get the params for remote test
    remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
    remote_pwd = params.get("remote_pwd", "ENTER.YOUR.REMOTE.PASSWORD")
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    if pre_operation == "remote" and (remote_ip.count("ENTER.YOUR.")
                                      or local_ip.count("ENTER.YOUR.")):
        raise error.TestNAError("Remote test parameters not configured")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get remote session
                session = remote.wait_for_login("ssh", remote_ip, "22", "root",
                                                remote_pwd, "#")
                # get uri of local
                uri = libvirt_vm.complete_uri(local_ip)

                cmd = "virsh -c %s start %s" % (uri, vm_ref)
                status, output = session.cmd_status_output(cmd)
                if status:
                    raise error.TestError(vm_ref, output)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session,
                                      params.get("username", ""),
                                      params.get("password", ""),
                                      r"[\#\$]\s*$",
                                      timeout=60,
                                      debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    raise error.TestFail(
                        "Failed to start vm with --autodestroy.")
                # Close the session, then the vm shoud be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    raise error.TestError("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        raise error.TestFail("Start vm failed.\n Detail: %s" %
                                             cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        raise error.TestFail("Run successfully with wrong "
                                             "command!\n Detail:%s" %
                                             cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    raise error.TestFail("VM is not paused when started with "
                                         "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    raise error.TestFail(
                        "VM was started with --autodestroy,"
                        "but not destroyed when virsh session "
                        "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'" %
                                            sleep_pid)
                if not status:
                    raise error.TestFail("VM was started with --force-boot,"
                                         "but it is restored from a"
                                         " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive():
                    raise error.TestFail("VM was started but it is not alive.")

        except remote.LoginError, detail:
            raise error.TestFail("Failed to login guest.")
    finally:
        # clean up
        if opt.count("force-boot"):
            virsh.managedsave_remove(vm_ref)

        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
    shutdown_timeout = int(params.get('shutdown_timeout', 60))

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            test.fail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive", debug=True)
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all", debug=True)
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            test.fail("Guest state should be saved")

        virsh.start(vm_name, debug=True)
        # This time vm should be in the list
        if vm.is_dead():
            test.fail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            test.fail("Guest should be active after" " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            test.fail("Managed save image exist " "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after started"
                              " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in runing state and can be login.
            vm.shutdown()
            if not vm.wait_for_shutdown(shutdown_timeout):
                test.fail('VM failed to shutdown')
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            test.fail("Guest shouldn't be undefined"
                      "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name,
                          options="--managed-save",
                          ignore_status=True).exit_status:
            test.fail("Guest can't be undefine with " "managed-save option")

        if os.path.exists(managed_save_file):
            test.fail("Managed save image exists" " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = process.run(cmd,
                          ignore_status=True,
                          shell=True,
                          ignore_bg_processes=True)
        output = ret.stdout_text.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.(\d+)", output, re.M)
        logging.debug("Find all fdinfo flags: %s" % lines)
        lines = [int(i, 8) & flags for i in lines]
        if flags not in lines:
            test.fail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name,
                                            dst_vm,
                                            True,
                                            timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm",
                                 shell=True).stdout_text.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'",
                              shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        resume_time = re.findall(pattern, ret.stdout_text, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [
            time.mktime(time.strptime(tm, "%b %y %H:%M:%S"))
            for tm in resume_time
        ]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds) - 1):
            if resume_seconds[i + 1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = process.run("cat /proc/1/comm",
                                 shell=True).stdout_text.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(
            virsh_cmd_stop,
            bash_cmd % (managed_save_file, managed_save_file, "1"), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        if all([
                "Suspending %s" % vm_name not in ret.stdout_text,
                "stopped, with saved guests" not in ret.stdout_text
        ]):
            test.fail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            test.fail("The exit code %s for libvirt-guests"
                      " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(
            virsh_cmd_start,
            bash_cmd % (managed_save_file, managed_save_file, "0"), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        virsh.managedsave_remove(vm_name, debug=True)
        if os.path.exists(managed_save_file):
            test.fail("Managed save image still exists")
        virsh.start(vm_name, debug=True)
        # The domain state should be running
        if vm.state() != "running":
            test.fail("Guest state should be" " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name, debug=True)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name, debug=True)
            virsh.start(vm_name, debug=True)
        # Check libvirtd status.
        if not libvirtd.is_running():
            test.fail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            test.fail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {
                    "type": "dynamic",
                    "model": "selinux",
                    "relabel": "yes"
                }
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception as e:
            logging.error(str(e))
            test.cancel("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            test.cancel("Older libvirt does not"
                        " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            process.run(
                "sed -i -e 's/ = /=/g' "
                "/etc/sysconfig/libvirt-guests",
                shell=True)
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True,
                            debug=True).exit_status:
                vmxml_backup.define()
                test.cancel("Cann't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # excuting managedsave command
        software_mgr = software_manager.SoftwareManager()
        if not software_mgr.check_installed('lsof'):
            logging.info('Installing lsof package:')
            software_mgr.install('lsof')
        bash_cmd = (
            "let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
            "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/%s |"
            "grep 'flags:.*') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = os.O_DIRECT
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(
                virsh_cmd,
                bash_cmd % (managed_save_file, managed_save_file, "1"), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(
                virsh_cmd,
                bash_cmd % (managed_save_file, managed_save_file, "0"), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests, start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref,
                                    options=option,
                                    ignore_status=True,
                                    debug=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                process.run(cmd, shell=True)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    if libvirtd_state == "off" and libvirt_version.version_compare(
                            5, 6, 0):
                        logging.info(
                            "From libvirt version 5.6.0 libvirtd is restarted "
                            "and command should succeed")
                    else:
                        test.fail("Run successfully with wrong command!")
            else:
                if status:
                    test.fail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        test.fail("Got invalid progress output")
                if remove_after_cmd:
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    # rhbz#1755303
                    if libvirt_version.version_compare(5, 6, 0):
                        os.remove("/run/libvirt/qemu/autostarted")
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(
                        virsh_cmd,
                        bash_cmd % (managed_save_file, managed_save_file, "0"),
                        flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name,
                            "--disable",
                            ignore_status=True,
                            debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
Beispiel #8
0
    except Exception, output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name, ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
Beispiel #9
0
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.

        # Ensure libvirtd is started
        if not libvirtd.is_running():
            libvirtd.start()
        if vm.is_paused():
            virsh.resume(vm_name)
        elif vm.is_dead():
            vm.start()
        # Wait for VM in running state
        wait_for_state("running")
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Wait for VM to be in shut off state
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        virsh.managedsave_remove(vm_name)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage")
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform job action on a domain.
    4.Get running and completed job info by virsh domjobinfo.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param action : virsh command and its option.
        :param vm_name : VM's name
        :param file : virsh command's file option, could be vm.dump, vm.save, etc.
        """
        command = "virsh %s %s %s" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    def cmp_jobinfo(result, info_list, job_type, actions):
        """
        Compare the output jobinfo with expected one

        :param result : the return from domjobinfo cmd
        :param info_list : an expected domjobinfo list
        :param job_type : an expected value for 'Job Type'
        :param actions : the job operation
        """
        logging.debug(result.stdout)
        out_list = result.stdout.strip().splitlines()
        out_dict = dict([x.split(':') for x in out_list])
        ret_cmp = set(out_dict.keys()) == set(info_list)
        if not ret_cmp:
            if set(info_list) - set(out_dict.keys()):
                test.fail("Missing expected items in domjobinfo output: %s" %
                          (set(info_list) - set(out_dict.keys())))
            else:
                new_cmp = set(out_dict.keys()) - set(info_list)
                known_item = {'Memory bandwidth'}
                # For running domjobinfo, 'Memory bandwidth' appears sometimes.
                if new_cmp != known_item or job_type == "Completed":
                    test.fail("New items appear: %s, pls modify script!",
                              new_cmp)
        else:
            if out_dict["Job type"].strip() != job_type:
                test.fail("Expect %s Job type but got %s" %
                          (job_type, out_dict["Job type"].strip()))
            if out_dict["Operation"].strip() != actions.capitalize():
                test.fail(
                    "Expect %s Operation but got %s" %
                    (actions.capitalize(), out_dict["Operation"].strip()))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    actions = params.get("domjobinfo_action", "dump")
    act_opt = params.get("dump_opt", "")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    # Use tmp_pipe to act as target file for job operation in subprocess,
    # such as vm.dump, vm.save, etc.
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.fifo")

    # Build job action
    action = ' '.join([actions, act_opt])
    if actions == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    # Expected domjobinfo list
    info_list = [
        "Job type", "Time elapsed", "Data processed", "Data remaining",
        "Data total", "Memory processed", "Memory remaining", "Memory total",
        "Dirty rate", "Iteration", "Constant pages", "Normal pages",
        "Normal data", "Expected downtime", "Setup time"
    ]
    if libvirt_version.version_compare(3, 2, 0):
        info_list.insert(1, "Operation")
        if libvirt_version.version_compare(3, 9, 0):
            info_list.insert(info_list.index("Dirty rate") + 1, "Page size")
            if libvirt_version.version_compare(5, 0, 0):
                info_list.insert(
                    info_list.index("Iteration") + 1, "Postcopy requests")
    logging.debug("The expected info_list for running job is %s", info_list)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif 'invalid' in vm_ref:
        vm_ref = params.get(vm_ref)

    # Get the subprocess of VM.
    # The command's effect is to get domjobinfo of running domain job.
    # So before do "domjobinfo", we must create a job on the domain.
    process = None
    if start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        # Target file param is not needed for managedsave operation
        if action == "managedsave ":
            process = get_subprocess(action, vm_name, "", None)
        else:
            process = get_subprocess(action, vm_name, tmp_pipe, None)

        f = open(tmp_pipe, 'rb')
        dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(),
                                           'ignore')

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break

    # Get domjobinfo while job is running
    ret = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    # Clear process env
    if process and f:
        dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)

    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    # Get completed domjobinfo
    if status_error == "no":
        time.sleep(5)
        if act_opt != "--live" and vm_ref == domid:
            # use vm_name but not id since domain shutoff
            vm_ref = vm_name
        vm_ref = "%s --completed" % vm_ref
        ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
        status_cmplt = ret_cmplt.exit_status

    # Recover the environment.
    if actions == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if pre_vm_state == "suspend":
        vm.resume()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or status_cmplt != 0:
            test.fail("Run failed with right command")

    if status_error == "no":
        # The 'managedsave' Operation will be shown as 'Save' in domjobinfo
        if actions == "managedsave":
            actions = "save"
        # Check output of "virsh domjobinfo"
        cmp_jobinfo(ret, info_list, "Unbounded", actions)
        # Check output of "virsh domjobinfo --completed"
        info_list.insert(
            info_list.index("Memory total") + 1, "Memory bandwidth")
        info_list[info_list.index("Expected downtime")] = "Total downtime"
        logging.debug("The expected info_list for completed job is %s",
                      info_list)
        cmp_jobinfo(ret_cmplt, info_list, "Completed", actions)
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(
                        virsh_cmd,
                        bash_cmd % (managed_save_file, managed_save_file, "0"),
                        flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name,
                            "--disable",
                            ignore_status=True,
                            debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            test.fail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive", debug=True)
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all", debug=True)
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            test.fail("Guest state should be saved")

        virsh.start(vm_name, debug=True)
        # This time vm should be in the list
        if vm.is_dead():
            test.fail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            test.fail("Guest should be active after"
                      " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            test.fail("Managed save image exist "
                      "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after started"
                              " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in runing state and can be login.
            vm.shutdown()
            vm.wait_for_shutdown()
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            test.fail("Guest shouldn't be undefined"
                      "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name, options="--managed-save",
                          ignore_status=True).exit_status:
            test.fail("Guest can't be undefine with "
                      "managed-save option")

        if os.path.exists(managed_save_file):
            test.fail("Managed save image exists"
                      " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = process.run(cmd, ignore_status=True, shell=True,
                          ignore_bg_processes=True)
        output = ret.stdout_text.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.(\d+)", output, re.M)
        logging.debug("Find all fdinfo flags: %s" % lines)
        lines = [int(i, 8) & flags for i in lines]
        if flags not in lines:
            test.fail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'", shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        resume_time = re.findall(pattern, ret.stdout_text, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(virsh_cmd_stop, bash_cmd %
                                   (managed_save_file, managed_save_file,
                                    "1"), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        if all(["Suspending %s" % vm_name not in ret.stdout_text,
                "stopped, with saved guests" not in ret.stdout_text]):
            test.fail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            test.fail("The exit code %s for libvirt-guests"
                      " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(virsh_cmd_start, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "0"), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        virsh.managedsave_remove(vm_name, debug=True)
        if os.path.exists(managed_save_file):
            test.fail("Managed save image still exists")
        virsh.start(vm_name, debug=True)
        # The domain state should be running
        if vm.state() != "running":
            test.fail("Guest state should be"
                      " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name, debug=True)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name, debug=True)
            virsh.start(vm_name, debug=True)
        # Check libvirtd status.
        if not libvirtd.is_running():
            test.fail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            test.fail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {"type": "dynamic", "model": "selinux",
                                 "relabel": "yes"}
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception as e:
            logging.error(str(e))
            test.cancel("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            test.cancel("Older libvirt does not"
                        " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            process.run("sed -i -e 's/ = /=/g' "
                        "/etc/sysconfig/libvirt-guests",
                        shell=True)
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True,
                            debug=True).exit_status:
                vmxml_backup.define()
                test.cancel("Cann't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # excuting managedsave command
        software_mgr = software_manager.SoftwareManager()
        if not software_mgr.check_installed('lsof'):
            logging.info('Installing lsof package:')
            software_mgr.install('lsof')
        bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
                    "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
                    "grep 'flags:.*') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = os.O_DIRECT
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "1"), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "0"), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests,
                                   start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option,
                                    ignore_status=True, debug=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                process.run(cmd, shell=True)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    test.fail("Run successfully with wrong command!")
            else:
                if status:
                    test.fail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        test.fail("Got invalid progress output")
                if remove_after_cmd:
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0"), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True, debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    # get the params for remote test
    remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
    remote_pwd = params.get("remote_pwd", "ENTER.YOUR.REMOTE.PASSWORD")
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    if pre_operation == "remote" and (remote_ip.count("ENTER.YOUR.") or
                                      local_ip.count("ENTER.YOUR.")):
        raise error.TestNAError("Remote test parameters not configured")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get remote session
                session = remote.wait_for_login("ssh", remote_ip, "22", "root",
                                                remote_pwd, "#")
                # get uri of local
                uri = libvirt_vm.complete_uri(local_ip)

                cmd = "virsh -c %s start %s" % (uri, vm_ref)
                status, output = session.cmd_status_output(cmd)
                if status:
                    raise error.TestError(vm_ref, output)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session, params.get("username", ""),
                                      params.get("password", ""), r"[\#\$]\s*$",
                                      timeout=60, debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    raise error.TestFail("Failed to start vm with --autodestroy.")
                # Close the session, then the vm shoud be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    raise error.TestError("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        raise error.TestFail("Start vm failed.\n Detail: %s"
                                             % cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        raise error.TestFail("Run successfully with wrong "
                                             "command!\n Detail:%s"
                                             % cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    raise error.TestFail("VM is not paused when started with "
                                         "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    raise error.TestFail("VM was started with --autodestroy,"
                                         "but not destroyed when virsh session "
                                         "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'"
                                            % sleep_pid)
                if not status:
                    raise error.TestFail("VM was started with --force-boot,"
                                         "but it is restored from a"
                                         " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive():
                    raise error.TestFail("VM was started but it is not alive.")

        except remote.LoginError, detail:
            raise error.TestFail("Failed to login guest.")
    finally:
        # clean up
        if opt.count("force-boot"):
            virsh.managedsave_remove(vm_ref)
        
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
Beispiel #14
0
    except Exception, output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name,
                                     ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
Beispiel #15
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform job action on a domain.
    4.Get running and completed job info by virsh domjobinfo.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param action : virsh command and its option.
        :param vm_name : VM's name
        :param file : virsh command's file option, could be vm.dump, vm.save, etc.
        """
        command = "virsh %s %s %s" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    def cmp_jobinfo(result, info_list, job_type, actions):
        """
        Compare the output jobinfo with expected one

        :param result : the return from domjobinfo cmd
        :param info_list : an expected domjobinfo list
        :param job_type : an expected value for 'Job Type'
        :param actions : the job operation
        """
        logging.debug(result.stdout)
        out_list = result.stdout.strip().splitlines()
        out_dict = dict([x.split(':') for x in out_list])
        ret_cmp = set(out_dict.keys()) == set(info_list)
        if not ret_cmp:
            test.fail("Not all output jobinfo items are as expected: Expect:%s, but get %s"
                      % (set(info_list), set(out_dict.keys())))
        else:
            if out_dict["Job type"].strip() != job_type:
                test.fail("Expect %s Job type but got %s" %
                          (job_type, out_dict["Job type"].strip()))
            if out_dict["Operation"].strip() != actions.capitalize():
                test.fail("Expect %s Operation but got %s" %
                          (actions.capitalize(), out_dict["Operation"].strip()))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    actions = params.get("domjobinfo_action", "dump")
    act_opt = params.get("dump_opt", "")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    # Use tmp_pipe to act as target file for job operation in subprocess,
    # such as vm.dump, vm.save, etc.
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.fifo")

    # Build job action
    action = ' '.join([actions, act_opt])
    if actions == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    # Expected domjobinfo list
    info_list = ["Job type", "Time elapsed",
                 "Data processed", "Data remaining", "Data total",
                 "Memory processed", "Memory remaining",
                 "Memory total", "Dirty rate",
                 "Iteration", "Constant pages", "Normal pages",
                 "Normal data", "Expected downtime", "Setup time"]
    if libvirt_version.version_compare(3, 2, 0):
        info_list.insert(1, "Operation")
        if libvirt_version.version_compare(3, 9, 0):
            info_list.insert(info_list.index("Dirty rate")+1, "Page size")
            if libvirt_version.version_compare(5, 0, 0):
                info_list.insert(info_list.index("Iteration")+1, "Postcopy requests")
    logging.debug("The expected info_list for running job is %s", info_list)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif 'invalid' in vm_ref:
        vm_ref = params.get(vm_ref)

    # Get the subprocess of VM.
    # The command's effect is to get domjobinfo of running domain job.
    # So before do "domjobinfo", we must create a job on the domain.
    process = None
    if start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        # Target file param is not needed for managedsave operation
        if action == "managedsave ":
            process = get_subprocess(action, vm_name, "", None)
        else:
            process = get_subprocess(action, vm_name, tmp_pipe, None)

        f = open(tmp_pipe, 'rb')
        dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore')

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break

    # Get domjobinfo while job is running
    ret = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    # Clear process env
    if process and f:
        dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)

    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    # Get completed domjobinfo
    if status_error == "no":
        time.sleep(5)
        if act_opt != "--live" and vm_ref == domid:
            # use vm_name but not id since domain shutoff
            vm_ref = vm_name
        vm_ref = "%s --completed" % vm_ref
        ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
        status_cmplt = ret_cmplt.exit_status

    # Recover the environment.
    if actions == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if pre_vm_state == "suspend":
        vm.resume()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or status_cmplt != 0:
            test.fail("Run failed with right command")

    if status_error == "no":
        # The 'managedsave' Operation will be shown as 'Save' in domjobinfo
        if actions == "managedsave":
            actions = "save"
        # Check output of "virsh domjobinfo"
        cmp_jobinfo(ret, info_list, "Unbounded", actions)
        # Check output of "virsh domjobinfo --completed"
        info_list.insert(info_list.index("Memory total")+1, "Memory bandwidth")
        info_list[info_list.index("Expected downtime")] = "Total downtime"
        logging.debug("The expected info_list for completed job is %s", info_list)
        cmp_jobinfo(ret_cmplt, info_list, "Completed", actions)
def post_kill_virsh_while_managedsave(params, libvirtd, vm):
    """
    Cleanup for test kill_virsh_while_managedsave
    """
    virsh.managedsave_remove(vm.name)
Beispiel #17
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s"
                             % (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                         remote_passwd, hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(
                command_on_remote, internal_timeout=30)
        except Exception as info:
            logging.error("Shell failed to execute command from"
                          " remote")
            return 1, info
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")
    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    desc = params.get("list_desc", "")
    libvirtd = params.get("libvirtd", "on")
    remote_ref = params.get("remote_ref", "")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip")
    remote_user = params.get("remote_user", "root")
    local_user = params.get("username", "root")
    local_pwd = params.get("local_pwd", None)

    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.help("list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise exceptions.TestSkipError("This version do not support vm type:%s"
                                       % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
            result_expected = vm_name
            logging.info("domain's name is: %s", vm_name)

        if options_ref == "vm_id":
            logging.info("%s's running-id is: %s", vm_name, domid)
            options_ref = "%s %s" % (domid, list_ref)
        elif options_ref == "vm_uuid":
            logging.info("%s's uuid is: %s", vm_name, domuuid)
            options_ref = "%s %s" % (domuuid, list_ref)
        elif options_ref == "inactive":
            vm.destroy()
            options_ref = "--inactive %s" % list_ref
        elif options_ref == "vm_name":
            options_ref = "%s %s" % (vm_name, list_ref)
        elif options_ref == "all":
            options_ref = "--all %s" % list_ref
        elif options_ref == "":
            options_ref = "%s" % list_ref

        # Prepare libvirtd status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if remote_ref == "remote":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Remote test parameters unchanged from default")
            logging.info("Execute virsh command on remote host %s.", remote_ip)
            status, output = list_local_domains_on_remote(options_ref,
                                                          remote_ip,
                                                          remote_pwd,
                                                          local_ip,
                                                          remote_user,
                                                          local_user,
                                                          local_pwd)
            logging.info("Status:%s", status)
            logging.info("Output:\n%s", output)
        else:
            if vm_ref:
                options_ref = "%s --%s" % (options_ref, vm_ref)
            result = virsh.dom_list(
                options_ref, ignore_status=True, print_info=True)
            status = result.exit_status
            output = result.stdout.strip()

    except Exception as output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name, ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
            if status:
                raise exceptions.TestFail("Run failed with right command.")
            if not re.search(result_expected, output):
                raise exceptions.TestFail("Run successful but result is not"
                                          " expected.")
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0"), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True, debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
Beispiel #19
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = "virsh -c %s list %s" % (complete_uri, options_ref)
        session = remote.remote_login(
            "ssh", remote_ip, "22", "root", remote_passwd, "#")
        time.sleep(5)
        status, output = session.cmd_status_output(
            command_on_remote, internal_timeout=5)
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.command("help list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise error.TestNAError("This version do not support vm type:%s"
                                % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise error.TestNAError("This version do not support list type:%s"
                                % list_ref)

    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    domuuid = vm.get_uuid().strip()
    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        tmp_xml = vm.backup_xml()
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    # Prepare libvirtd status
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    if list_ref == "--uuid":
        result_expected = domuuid
        logging.info("%s's uuid is: %s", vm_name, domuuid)
    else:
        result_expected = vm_name
        logging.info("domain's name is: %s", vm_name)

    if options_ref == "vm_id":
        domid = vm.get_id().strip()
        logging.info("%s's running-id is: %s", vm_name, domid)
        options_ref = "%s %s" % (domid, list_ref)
    elif options_ref == "vm_uuid":
        logging.info("%s's uuid is: %s", vm_name, domuuid)
        options_ref = "%s %s" % (domuuid, list_ref)
    elif options_ref == "inactive":
        vm.destroy()
        options_ref = "--inactive %s" % list_ref
    elif options_ref == "vm_name":
        options_ref = "%s %s" % (vm_name, list_ref)
    elif options_ref == "all":
        options_ref = "--all %s" % list_ref
    elif options_ref == "":
        options_ref = "%s" % list_ref

    remote_ref = params.get("remote_ref", "local")
    if remote_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError(
                "Remote test parameters unchanged from default")
        logging.info("Execute virsh command on remote host %s.", remote_ip)
        status, output = list_local_domains_on_remote(
            options_ref, remote_ip, remote_pwd, local_ip)
        logging.info("Status:%s", status)
        logging.info("Output:\n%s", output)
    else:
        if vm_ref:
            options_ref = "%s --%s" % (options_ref, vm_ref)
        result = virsh.dom_list(
            options_ref, ignore_status=True, print_info=True)
        status = result.exit_status
        output = result.stdout.strip()

    # Recover libvirtd service status
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover of domain
    if vm_ref == "transient":
        vm.define(tmp_xml)
    elif vm_ref == "managed-save":
        # Recover saved guest.
        virsh.managedsave_remove(vm_name, ignore_status=True, print_info=True)

    # Check result
    status_error = (status_error == "no") and (addition_status_error == "no")
    if vm_ref == "managed-save":
        saved_output = re.search(vm_name + "\s+saved", output)
        if saved_output:
            output = saved_output.group(0)
        else:
            output = ""

    if not status_error:
        if status == 0 and re.search(result_expected, output):
            raise error.TestFail("Run successful with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command.")
        if not re.search(result_expected, output):
            raise error.TestFail("Run successful but result is not expected.")
Beispiel #20
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Perform test operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}
    hook_file = params.get("hook_file", "/etc/libvirt/hooks/qemu")
    hook_log = params.get("hook_log", "/tmp/qemu.log")

    def prepare_hook_file(hook_op):
        """
        Create hook file.
        """
        logging.info("hook script: %s", hook_op)
        hook_lines = hook_op.split(';')
        hook_dir = os.path.dirname(hook_file)
        if not os.path.exists(hook_dir):
            os.mkdir(hook_dir)
        with open(hook_file, 'w') as hf:
            hf.write('\n'.join(hook_lines))
        os.chmod(hook_file, 0o755)

        # restart libvirtd
        libvirtd.restart()

    def check_hooks(opt):
        """
        Check hook operations in log file.
        """
        logging.debug("Trying to check the string '%s'"
                      " in hook log", opt)
        if not os.path.exists(hook_log):
            logging.debug("Log file doesn't exist")
            return False

        logs = None
        with open(hook_log, 'r') as lf:
            logs = lf.read()
        if not logs:
            return False

        logging.debug("Read from hook log file: %s", logs)
        if opt in logs:
            return True
        else:
            return False

    def start_stop_hook():
        """
        Do start/stop operation and check the results.
        """
        logging.info("Try to test start/stop hooks...")
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script %
                          (vm_name, hook_log))
        vm.start()
        vm.wait_for_login().close()
        try:
            hook_str = hook_para + " prepare begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " started begin -"
            assert check_hooks(hook_str)
            # stop the vm
            vm.destroy()
            hook_str = hook_para + " stopped end -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " release end -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check "
                      "start/stop hooks.")

    def save_restore_hook():
        """
        Do save/restore operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script %
                      (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script %
                              (vm_name, hook_log))
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for"
                          " save operation")
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for"
                          " restore operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()
        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check "
                          "restore hooks.")

    def managedsave_hook():
        """
        Do managedsave operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script %
                      (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script %
                              (vm_name, hook_log))
        ret = virsh.managedsave(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for"
                          " managedsave operation")
        vm.start()
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for"
                          " managedsave operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()

        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check "
                          "managedsave hooks.")

    def libvirtd_hook():
        """
        Check the libvirtd hooks.
        """
        prepare_hook_file(hook_script %
                          (vm_name, hook_log))
        hook_para = "%s %s" % (hook_file, vm_name)
        libvirtd.restart()
        try:
            hook_str = hook_para + " reconnect begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " libvirtd hooks")

    def daemon_hook():
        """
        Check the libvirtd hooks.
        """
        # stop daemon first
        libvirtd.stop()
        prepare_hook_file(hook_script % hook_log)
        try:
            libvirtd.start()
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)
            # Restart libvirtd and test again
            if os.path.exists(hook_log):
                os.remove(hook_log)
            libvirtd.restart()
            hook_str = hook_file + " - shutdown - shutdown"
            assert check_hooks(hook_str)
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)

            # kill the daemon with SIGHUP
            if os.path.exists(hook_log):
                os.remove(hook_log)
            utils_misc.signal_program('libvirtd', 1,
                                      '/var/run')
            hook_str = hook_file + " - reload begin SIGHUP"
            assert check_hooks(hook_str)

        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " daemon hooks")

    def attach_hook():
        """
        Check attach hooks.
        """
        # Start a domain with qemu command.
        disk_src = vm.get_first_disk_devices()['source']
        vm_test = "foo"
        prepare_hook_file(hook_script %
                          (vm_test, hook_log))
        qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
        if "ppc" in platform.machine():
            qemu_bin = "%s -machine pseries" % qemu_bin
        qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                    " -monitor unix:/tmp/demo,"
                    "server,nowait -name %s" %
                    (qemu_bin, disk_src, vm_test))
        ret = process.run("%s &" % qemu_cmd, shell=True)
        pid = process.run("ps -ef | grep '%s' | grep -v grep | awk"
                          " '{print $2}'" % qemu_cmd, shell=True).stdout.strip()
        if not pid:
            test.fail("Cannot get pid of qemu command")
        ret = virsh.qemu_attach(pid, **virsh_dargs)
        if ret.exit_status:
            utils_misc.kill_process_tree(pid)
            test.fail("Cannot attach qemu process")
        else:
            virsh.destroy(vm_test)
        hook_str = hook_file + " " + vm_test + " attach begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check"
                      " attach hooks")

    def edit_iface(net_name):
        """
        Edit interface options for vm.
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        iface_xml = vmxml.get_devices(device_type="interface")[0]
        vmxml.del_device(iface_xml)
        iface_xml.type_name = "network"
        iface_xml.source = {"network": net_name}
        del iface_xml.address
        vmxml.add_device(iface_xml)
        vmxml.sync()

    def network_hook():
        """
        Check network hooks.
        """
        # Set interface to use default network
        net_name = params.get("net_name", "default")
        edit_iface(net_name)
        prepare_hook_file(hook_script %
                          (net_name, hook_log))
        try:
            # destroy the network
            ret = virsh.net_destroy(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " stopped end -"
            assert check_hooks(hook_str)

            # start network
            ret = virsh.net_start(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_file + " " + net_name + " started begin -"
            assert check_hooks(hook_str)

            # plug a interface
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            mac_addr = "52:54:00:9a:53:a9"
            ret = virsh.attach_interface(vm_name,
                                         ("network %s --mac %s" %
                                          (net_name, mac_addr)))
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " plugged begin -"
            assert check_hooks(hook_str)
            ret = virsh.detach_interface(vm_name,
                                         "network --mac %s" % mac_addr)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
            # remove the log file
            if os.path.exists(hook_log):
                os.remove(hook_log)
            # destroy the domain
            vm.destroy()
            hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " network hooks")

    def run_scale_test():
        """
        Try to start and stop domain many times.
        """
        prepare_hook_file(hook_script)
        loop_num = int(params.get("loop_num", 30))
        loop_timeout = int(params.get("loop_timeout", 600))
        cmd1 = ("for i in {1..%s};do echo $i 'start guest -';"
                "virsh start %s;sleep 1;echo $i 'stop guest -';"
                "virsh destroy %s;sleep 1;done;"
                % (loop_num, vm_name, vm_name))
        cmd2 = ("for i in {1..%s};do virsh list;sleep 1;done;"
                % loop_num * 2)
        utils_misc.run_parallel([cmd1, cmd2], timeout=loop_timeout)

    start_error = "yes" == params.get("start_error", "no")
    test_start_stop = "yes" == params.get("test_start_stop", "no")
    test_attach = "yes" == params.get("test_attach", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_saverestore = "yes" == params.get("test_saverestore", "no")
    test_daemon = "yes" == params.get("test_daemon", "no")
    test_network = "yes" == params.get("test_network", "no")
    basic_test = "yes" == params.get("basic_test", "yes")
    scale_test = "yes" == params.get("scale_test", "yes")
    domainxml_test = "yes" == params.get("domainxml_test", "no")

    # The hook script is provided from config
    hook_script = params.get("hook_script")

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        try:
            if test_start_stop:
                start_stop_hook()
            elif test_attach:
                attach_hook()
            elif start_error:
                prepare_hook_file(hook_script %
                                  (vm_name, hook_log))
            elif test_daemon:
                daemon_hook()
            elif test_network:
                network_hook()
            elif scale_test:
                run_scale_test()
            # Start the domain
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if test_libvirtd:
                libvirtd_hook()
            elif test_saverestore:
                save_restore_hook()
            elif test_managedsave:
                managedsave_hook()

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if start_error:
                pass
            else:
                test.fail('VM Failed to start for some reason!')
        else:
            if start_error:
                test.fail('VM started unexpected')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if test_managedsave:
            virsh.managedsave_remove(vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if os.path.exists(hook_file):
            os.remove(hook_file)
        if os.path.exists(hook_log):
            os.remove(hook_log)
        libvirtd.restart()
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(test.tmpdir, "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain contorl interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref, options, ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        raise error.TestFail("Run failed with right command")
    else:
        # Check domain contorl interface state without job on domain.
        ret = virsh.domcontrol(vm_ref, options, readonly=readonly,
                               ignore_status=True, debug=True)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                raise error.TestFail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
Beispiel #22
0
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    remote_uri = params.get("remote_uri")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd")
    remote_user = params.get("remote_user", "root")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    if remote_uri:
        if remote_ip.count("EXAMPLE"):
            test.cancel("The remote ip is Sample one, pls configure it first")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain control interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref,
                                       options,
                                       ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        test.fail("Run failed with right command")
    else:
        if remote_uri:
            # check remote domain status
            if not virsh.is_alive(vm_name, uri=remote_uri):
                # If remote domain is not running, start remote domain
                virsh.start(vm_name, uri=remote_uri)

        # Check domain control interface state without job on domain.
        ret = virsh.domcontrol(vm_ref,
                               options,
                               readonly=readonly,
                               ignore_status=True,
                               debug=True,
                               uri=remote_uri)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                test.fail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if remote_uri:
        if virsh.is_alive(vm_name, uri=remote_uri):
            # Destroy remote domain
            virsh.destroy(vm_name, uri=remote_uri)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
Beispiel #23
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported "
                    "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_data_dir(), "uefi_disk.qcow2")
    persistent_state = ("yes" == params.get("persistent_state", "no"))
    check_pcrbanks = ('yes' == params.get("check_pcrbanks", "no"))
    remove_pcrbank = ('yes' == params.get("remove_pcrbank", "no"))
    pcrbank_change = params.get("pcrbank_change")
    active_pcr_banks = params.get("active_pcr_banks")
    if active_pcr_banks:
        active_pcr_banks = active_pcr_banks.split(",")

    libvirt_version.is_libvirt_feature_supported(params)

    # Tpm emulator tpm-tis_model for aarch64 supported since libvirt 7.1.0
    if platform.machine() == 'aarch64' and tpm_model == 'tpm-tis' \
        and backend_type == 'emulator' \
            and not libvirt_version.version_compare(7, 1, 0):

        test.cancel("Tpm emulator tpm-tis_model for aarch64 "
                    "is not supported on current libvirt")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    if tpm_v == "1.2":
                        test.error("Failed to install tpm-tools on host")
                    else:
                        logging.debug("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install(["swtpm", "swtpm-tools"]):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {"loader_readonly": "yes",
                         "secure": "yes",
                         "loader_type": "pflash",
                         "loader": loader,
                         "nvram": nvram}
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                        True, timeout=360, debug=True)
            if ret_clone.exit_status:
                test.error("Need more than one domains, but error occurred when virt-clone.")
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s backend version xml for tpm dev "
                          "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.*%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.*%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append("-chardev.*socket,id=chrtpm,"
                                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" % pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("swtpm socket.*%s" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm socket process missing.')
            else:
                return
        elif remove_dev:
            test.fail('swtpm socket process still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = ["--ctrl", "--tpmstate", "--log", "--tpm2"]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.service start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.service start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version identified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output("systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel("tpm-crb passthrough only works with host tpm2.0, "
                                    "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output("ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
                if active_pcr_banks or check_pcrbanks:
                    output3 = session.cmd_output("tpm2_pcrread")
                    logging.debug("Command output:\n %s", output3)
                    actual_pcrbanks = []
                    if output3.find('sha256') != 6:
                        actual_pcrbanks.append('sha1')
                    if output3.find('sha384') != output3.find('sha256') + 8:
                        actual_pcrbanks.append('sha256')
                    if output3.find('sha512') != output3.find('sha384') + 8:
                        actual_pcrbanks.append('sha384')
                    if 'sha512' not in output3[-8:]:
                        actual_pcrbanks.append('sha512')
                    logging.debug("Actual active PCR banks in guest are: %s", actual_pcrbanks)
                    if active_pcr_banks and active_pcr_banks != actual_pcrbanks:
                        test.fail("Actual active PCR banks in guest do not match configured in xml.")
                    elif check_pcrbanks and actual_pcrbanks != ["sha256"]:
                        test.fail("Default PCR bank is not sha256: %s" % actual_pcrbanks)
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install necessary pkgs to build test suite
        if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                if "ERROR" in output:
                    test.fail("test suite check failed: %s" % re.findall(r'test_.* ... ERROR', output))
                else:
                    test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def persistent_test(vm, vm_xml):
        """
        Test for vtpm with persistent_state.
        """
        vm.undefine("--nvram")
        virsh.create(vm_xml.xml, **virsh_dargs)
        domuuid = vm.get_uuid()
        state_file = "/var/lib/libvirt/swtpm/%s/tpm2/tpm2-00.permall" % domuuid
        process.run("ls -Z %s" % state_file)
        session = vm.wait_for_login()
        test_guest_tpm("2.0", session, False)
        session.close()
        virsh.dom_list("--transient", debug=True)
        vm.destroy()
        if not os.path.exists(state_file):
            test.fail("Swtpm state file: %s does not exist after destroy vm'" % state_file)
        process.run("ls -Z %s" % state_file)

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail("Vtpm for each guest should not interfere with each other")

    def save_modify_pcrbank(vm_name, active_pcr_banks, pcrbank_change):
        """
        Try to modify active_pcr_banks for managedsaved vm.
        By making changes to xmlfile and managedsave-define with it

        :param vm_name: current vm name
        :param active_pcr_banks: current active_pcr_banks configured in managedsaved xml
        :param pcrbank_change: new active_pcr_banks to replace with
        """
        xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml')
        virsh.managedsave_dumpxml(vm_name, to_file=xmlfile, ignore_status=False, debug=False)
        with open(xmlfile) as file_xml:
            old_pcrbank = '<%s/>' % active_pcr_banks[0]
            new_pcrbank = '<%s/>' % pcrbank_change
            updated_xml = file_xml.read().replace(old_pcrbank, new_pcrbank)
            logging.debug("Updated xml for managedsave-define is:\n %s" % updated_xml)
        with open(xmlfile, 'w') as file_xml:
            file_xml.write(updated_xml)
        ret = virsh.managedsave_define(vm_name, xmlfile, '', ignore_status=True, debug=True)
        libvirt.check_exit_status(ret, status_error)

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s", tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if persistent_state:
                        backend.persistent_state = "yes"
                    if prepare_secret:
                        auth_sec_dict = {"sec_ephemeral": "no",
                                         "sec_private": "yes",
                                         "sec_desc": "sample vTPM secret",
                                         "sec_usage": "vtpm",
                                         "sec_name": "VTPM_example"}
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
                    if active_pcr_banks:
                        backend_pcrbank = backend.ActivePCRBanks()
                        for pcrbank in active_pcr_banks:
                            backend_pcrbank.add_pcrbank(pcrbank)
                        backend.active_pcr_banks = backend_pcrbank
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        if persistent_state:
            persistent_test(vm, vm_xml)
            return
        else:
            ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if remove_pcrbank:
                                for pcrbank in active_pcr_banks:
                                    backend_pcrbank.remove_pcrbank(pcrbank)
                                backend.active_pcr_banks = backend_pcrbank
                                tpm_dev.backend = backend
                                vm_xml.add_device(tpm_dev, True)
                                # Do not clear active_pcr_banks for later test_guest_tpm check,since in guest os
                                # it should keep as last startup config if it's removed from guest xml.
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encryption state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain'] or remove_pcrbank:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name, ignore_status=True, debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if pcrbank_change:
                        save_modify_pcrbank(vm_name, active_pcr_banks, pcrbank_change)
                        return
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if pcrbank_change:
            virsh.managedsave_remove(vm_name, debug=True)
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        if os.path.exists("/var/lib/libvirt/swtpm/%s" % domuuid):
            shutil.rmtree("/var/lib/libvirt/swtpm/%s" % domuuid)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
Beispiel #24
0
def run(test, params, env):
    """
    Test command: virsh managedsave-xxx
    including virsh managedsave-edit
              virsh managedsave-dumpxml
              virsh managedsave-define
              ...
    """
    vm_name = params.get('main_vm')
    checkpoint = params.get('checkpoint', '')
    error_msg = params.get('error_msg', '')
    virsh_opt = params.get('virsh_opt', '')
    ms_extra_options = params.get('ms_extra_options', '')
    pre_state = params.get('pre_state', '')
    status_error = 'yes' == params.get('status_error', 'no')

    vm = env.get_vm(vm_name)
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def start_and_login_vm():
        """
        Start vm and login, after which vm is accessible
        """
        vm.start()
        vm.wait_for_login().close()

    try:
        if checkpoint == 'dumpxml':
            # Check managedsave-dumpxml
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, **virsh_dargs)
            tmp_dir = data_dir.get_tmp_dir()
            save_img_xml = os.path.join(tmp_dir, 'save_img.xml')
            managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml')
            virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name,
                                     ' > %s' % save_img_xml, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml,
                                      **virsh_dargs)
            result_need_check = process.run('diff %s %s' %
                                            (save_img_xml, managed_save_xml),
                                            shell=True,
                                            verbose=True)
        if checkpoint == 'secure_info':
            # Check managedsave-dumpxml with option --security-info
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            # Add graphics vnc if guest doesn't have
            if not vmxml.get_devices(device_type="graphics"):
                logging.debug("Guest doesn't have graphic, add one")
                graphics.Graphics.add_graphic(vm_name, graphic="vnc")
            vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'})
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            default_xml = virsh.managedsave_dumpxml(vm_name,
                                                    **virsh_dargs).stdout_text
            if 'passwd' in default_xml:
                test.fail('Found "passwd" in dumped vm xml. '
                          'Secure info like "passwd" should not be dumped.')
            secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info',
                                                   **virsh_dargs).stdout_text
            if 'passwd' not in secure_xml:
                test.fail('Not found "passwd" in dumped vm xml.'
                          'Secure info like "passwd" should be dumped '
                          'with option "--security-info"')
        if checkpoint == 'define':
            # Make change to a managedsave-dumped xml and redefine vm
            # and check if the change take effect
            start_option = '--paused' if pre_state == 'paused' else ''
            virsh.start(vm_name, start_option, **virsh_dargs)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            logging.debug(vmxml.devices)
            disk = vmxml.get_devices('disk')[0]
            img_path = disk.source.attrs['file']
            logging.info('Original image path: %s', img_path)
            # Copy old image to new image
            new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img')
            shutil.copyfile(img_path, new_img_path)
            virsh.managedsave(vm_name, **virsh_dargs)
            xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml')
            virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs)
            # Make change to xmlfile and managedsave-define with it
            with open(xmlfile) as file_xml:
                updated_xml = file_xml.read().replace(img_path, new_img_path)
            with open(xmlfile, 'w') as file_xml:
                file_xml.write(updated_xml)
            virsh.managedsave_define(vm_name, xmlfile, ms_extra_options,
                                     **virsh_dargs)
            virsh.start(vm_name, **virsh_dargs)
            xml_after_define = virsh.dumpxml(vm_name,
                                             **virsh_dargs).stdout_text
            if 'test.img' not in xml_after_define:
                test.fail(
                    'Not found "test.img" in vm xml after managedsave-define.'
                    'Modification to xml did not take effect.')
        if checkpoint == 'no_save':
            # Start a guest but do not managedsave it
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
        if checkpoint == 'rm_after_save':
            # Remove saved file after managedsave a vm
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            os.remove(MANAGEDSAVE_FILE % vm_name)
        if checkpoint == 'not_saved_corrupt':
            # Do not managedsave a vm, but create a fake managedsaved file by
            # 'touch' a file
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
            process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True)
            params['clean_managed_save'] = True
        if checkpoint == 'readonly':
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
        if checkpoint == 'exclusive_option':
            virsh.managedsave(vm_name, **virsh_dargs)

        # Test managedsave-edit, managedsave_dumpxml, managedsave-define
        if params.get('check_cmd_error', '') == 'yes':
            ms_command = params.get('ms_command', '')
            if ms_command == 'edit':
                result_need_check = virsh.managedsave_edit(vm_name,
                                                           ms_extra_options,
                                                           timeout=60,
                                                           virsh_opt=virsh_opt,
                                                           debug=True)
            if ms_command == 'dumpxml':
                result_need_check = virsh.managedsave_dumpxml(
                    vm_name, ms_extra_options, virsh_opt=virsh_opt, debug=True)
            if ms_command == 'define':
                result_need_check = virsh.managedsave_define(
                    vm_name,
                    bkxml.xml,
                    ms_extra_options,
                    virsh_opt=virsh_opt,
                    debug=True)
        # If needs to check result, check it
        if 'result_need_check' in locals():
            logging.info('Check command result.')
            libvirt.check_exit_status(result_need_check, status_error)
            if error_msg:
                libvirt.check_result(result_need_check, [error_msg])

    finally:
        if params.get('clean_managed_save'):
            os.remove(MANAGEDSAVE_FILE % vm_name)
        utils_libvirtd.libvirtd_restart()
        virsh.managedsave_remove(vm_name, debug=True)
        bkxml.sync()
Beispiel #25
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()

        if customize_cpu_features:
            for idx in range(len(cpu_xml.get_feature_list()) - 1, -1, -1):
                cpu_xml.remove_feature(idx)
            domcapa_xml = domcapability_xml.DomCapabilityXML()
            features = domcapa_xml.get_additional_feature_list(
                'host-model', ignore_features=None)
            for feature in features:
                for feature_name, feature_policy in feature.items():
                    # For host-passthrough mode, adding "invtsc" requires
                    # more settings, so it will be ignored.
                    if feature_name != "invtsc":
                        cpu_xml.add_feature(feature_name, feature_policy)

        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def check_feature_list(vm, original_dict):
        """
        Compare new cpu feature list and original cpu

        :param vm: VM object
        :original_dict: Cpu feature dict , {"name1":"policy1","name2":"policy2"}
        """
        new_cpu_xml = vm_xml.VMXML.new_from_dumpxml(vm.name).cpu
        new_feature_dict = new_cpu_xml.get_dict_type_feature()
        if new_feature_dict != original_dict:
            test.fail('CPU feature lists are different, original is :%s,'
                      ' new is %s:' % (original_dict, new_feature_dict))

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")
    customize_cpu_features = "yes" == params.get("customize_cpu_features",
                                                 "no")
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        cpu_xml = vmxml.cpu
        feature_dict = cpu_xml.get_dict_type_feature()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)

            # Add case: Check cpu xml after domain Managedsaved and restored
            if test_operations:
                for item in test_operations.split(','):
                    if item == "managedsave_restore":
                        # (1)Domain Manage saved
                        virsh.managedsave(vm_name,
                                          ignore_status=False,
                                          debug=True)
                        check_feature_list(vm, feature_dict)
                        # (2)Domain Restore
                        virsh.restore(managed_save_file,
                                      ignore_status=False,
                                      debug=True)
                        # (5)Check mode and feature list here
                        libvirt.check_dumpxml(vm, cpu_mode)
                        check_feature_list(vm, feature_dict)

    finally:
        logging.debug("Recover test environment")
        if os.path.exists(managed_save_file):
            virsh.managedsave_remove(vm_name, debug=True)
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.

        # Ensure libvirtd is started
        if not libvirtd.is_running():
            libvirtd.start()
        if vm.is_paused():
            virsh.resume(vm_name)
        elif vm.is_dead():
            vm.start()
        # Wait for VM in running state
        wait_for_state("running")
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Wait for VM to be in shut off state
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        virsh.managedsave_remove(vm_name)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage")
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
def run(test, params, env):
    """
    Test command: virsh managedsave-xxx
    including virsh managedsave-edit
              virsh managedsave-dumpxml
              virsh managedsave-define
              ...
    """
    vm_name = params.get('main_vm')
    checkpoint = params.get('checkpoint', '')
    error_msg = params.get('error_msg', '')
    ms_extra_options = params.get('ms_extra_options', '')
    pre_state = params.get('pre_state', '')
    status_error = 'yes' == params.get('status_error', 'no')

    vm = env.get_vm(vm_name)
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def start_and_login_vm():
        """
        Start vm and login, after which vm is accessible
        """
        vm.start()
        vm.wait_for_login().close()

    try:
        if checkpoint == 'dumpxml':
            # Check managedsave-dumpxml
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, **virsh_dargs)
            tmp_dir = data_dir.get_tmp_dir()
            save_img_xml = os.path.join(tmp_dir, 'save_img.xml')
            managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml')
            virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name, ' > %s'
                                     % save_img_xml, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml,
                                      **virsh_dargs)
            result_need_check = process.run('diff %s %s' %
                                            (save_img_xml, managed_save_xml),
                                            shell=True, verbose=True)
        if checkpoint == 'secure_info':
            # Check managedsave-dumpxml with option --security-info
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'})
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            default_xml = virsh.managedsave_dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'passwd' in default_xml:
                test.fail('Found "passwd" in dumped vm xml. '
                          'Secure info like "passwd" should not be dumped.')
            secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info',
                                                   **virsh_dargs).stdout_text
            if 'passwd' not in secure_xml:
                test.fail('Not found "passwd" in dumped vm xml.'
                          'Secure info like "passwd" should be dumped '
                          'with option "--security-info"')
        if checkpoint == 'define':
            # Make change to a managedsave-dumped xml and redefine vm
            # and check if the change take effect
            start_option = '--paused' if pre_state == 'paused' else ''
            virsh.start(vm_name, start_option, **virsh_dargs)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            logging.debug(vmxml.devices)
            disk = vmxml.get_devices('disk')[0]
            img_path = disk.source.attrs['file']
            logging.info('Original image path: %s', img_path)
            # Copy old image to new image
            new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img')
            shutil.copyfile(img_path, new_img_path)
            virsh.managedsave(vm_name, **virsh_dargs)
            xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml')
            virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs)
            # Make change to xmlfile and managedsave-define with it
            with open(xmlfile) as file_xml:
                updated_xml = file_xml.read().replace(img_path, new_img_path)
            with open(xmlfile, 'w') as file_xml:
                file_xml.write(updated_xml)
            virsh.managedsave_define(vm_name, xmlfile, ms_extra_options, **virsh_dargs)
            virsh.start(vm_name, **virsh_dargs)
            xml_after_define = virsh.dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'test.img' not in xml_after_define:
                test.fail('Not found "test.img" in vm xml after managedsave-define.'
                          'Modification to xml did not take effect.')
        if checkpoint == 'no_save':
            # Start a guest but do not managedsave it
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
        if checkpoint == 'rm_after_save':
            # Remove saved file after managedsave a vm
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            os.remove(MANAGEDSAVE_FILE % vm_name)
        if checkpoint == 'not_saved_corrupt':
            # Do not managedsave a vm, but create a fake managedsaved file by
            # 'touch' a file
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
            process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True)
            params['clean_managed_save'] = True
        if checkpoint == 'exclusive_option':
            virsh.managedsave(vm_name, **virsh_dargs)

        # Test managedsave-edit, managedsave_dumpxml, managedsave-define
        if params.get('check_cmd_error', '') == 'yes':
            ms_command = params.get('ms_command', '')
            if ms_command == 'edit':
                result_need_check = virsh.managedsave_edit(vm_name,
                                                           ms_extra_options,
                                                           debug=True)
            if ms_command == 'dumpxml':
                result_need_check = virsh.managedsave_dumpxml(vm_name,
                                                              ms_extra_options,
                                                              debug=True)
            if ms_command == 'define':
                result_need_check = virsh.managedsave_define(vm_name,
                                                             bkxml.xml,
                                                             ms_extra_options,
                                                             debug=True)
        # If needs to check result, check it
        if 'result_need_check' in locals():
            logging.info('Check command result.')
            libvirt.check_exit_status(result_need_check, status_error)
            if error_msg:
                libvirt.check_result(result_need_check, [error_msg])

    finally:
        if params.get('clean_managed_save'):
            os.remove(MANAGEDSAVE_FILE % vm_name)
        utils_libvirtd.libvirtd_restart()
        virsh.managedsave_remove(vm_name, debug=True)
        bkxml.sync()
Beispiel #28
0
def post_kill_virsh_while_managedsave(params, libvirtd, vm):
    """
    Cleanup for test kill_virsh_while_managedsave
    """
    virsh.managedsave_remove(vm.name)
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Perform test operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}
    hook_file = params.get("hook_file", "/etc/libvirt/hooks/qemu")
    hook_log = params.get("hook_log", "/tmp/qemu.log")

    def prepare_hook_file(hook_op):
        """
        Create hook file.
        """
        logging.info("hook script: %s", hook_op)
        hook_lines = hook_op.split(';')
        hook_dir = os.path.dirname(hook_file)
        if not os.path.exists(hook_dir):
            os.mkdir(hook_dir)
        with open(hook_file, 'w') as hf:
            hf.write('\n'.join(hook_lines))
        os.chmod(hook_file, 0o755)

        # restart libvirtd
        libvirtd.restart()

    def check_hooks(opt):
        """
        Check hook operations in log file.
        """
        logging.debug("Trying to check the string '%s'"
                      " in hook log", opt)
        if not os.path.exists(hook_log):
            logging.debug("Log file doesn't exist")
            return False

        logs = None
        with open(hook_log, 'r') as lf:
            logs = lf.read()
        if not logs:
            return False

        logging.debug("Read from hook log file: %s", logs)
        if opt in logs:
            return True
        else:
            return False

    def start_stop_hook():
        """
        Do start/stop operation and check the results.
        """
        logging.info("Try to test start/stop hooks...")
        hook_para = "%s %s" % (hook_file, vm_name)
        prepare_hook_file(hook_script %
                          (vm_name, hook_log))
        vm.start()
        vm.wait_for_login().close()
        try:
            hook_str = hook_para + " prepare begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " started begin -"
            assert check_hooks(hook_str)
            # stop the vm
            vm.destroy()
            hook_str = hook_para + " stopped end -"
            assert check_hooks(hook_str)
            hook_str = hook_para + " release end -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check "
                      "start/stop hooks.")

    def save_restore_hook():
        """
        Do save/restore operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script %
                      (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script %
                              (vm_name, hook_log))
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for"
                          " save operation")
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for"
                          " restore operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()
        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check "
                          "restore hooks.")

    def managedsave_hook():
        """
        Do managedsave operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script %
                      (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script %
                              (vm_name, hook_log))
        ret = virsh.managedsave(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                test.fail("Failed to check hooks for"
                          " managedsave operation")
        vm.start()
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                test.fail("Failed to check hooks for"
                          " managedsave operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()

        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                test.fail("Failed to check "
                          "managedsave hooks.")

    def libvirtd_hook():
        """
        Check the libvirtd hooks.
        """
        prepare_hook_file(hook_script %
                          (vm_name, hook_log))
        hook_para = "%s %s" % (hook_file, vm_name)
        libvirtd.restart()
        try:
            hook_str = hook_para + " reconnect begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " libvirtd hooks")

    def daemon_hook():
        """
        Check the libvirtd hooks.
        """
        # stop daemon first
        libvirtd.stop()
        prepare_hook_file(hook_script % hook_log)
        try:
            libvirtd.start()
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)
            # Restart libvirtd and test again
            if os.path.exists(hook_log):
                os.remove(hook_log)
            libvirtd.restart()
            hook_str = hook_file + " - shutdown - shutdown"
            assert check_hooks(hook_str)
            hook_str = hook_file + " - start - start"
            assert check_hooks(hook_str)

            # kill the daemon with SIGHUP
            if os.path.exists(hook_log):
                os.remove(hook_log)
            utils_misc.signal_program('libvirtd', 1,
                                      '/var/run')
            hook_str = hook_file + " - reload begin SIGHUP"
            assert check_hooks(hook_str)

        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " daemon hooks")

    def attach_hook():
        """
        Check attach hooks.
        """
        # Start a domain with qemu command.
        disk_src = vm.get_first_disk_devices()['source']
        vm_test = "foo"
        prepare_hook_file(hook_script %
                          (vm_test, hook_log))
        qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
        if "ppc" in platform.machine():
            qemu_bin = "%s -machine pseries" % qemu_bin
        qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                    " -monitor unix:/tmp/demo,"
                    "server,nowait -name %s" %
                    (qemu_bin, disk_src, vm_test))
        ret = process.run("%s &" % qemu_cmd, shell=True)
        pid = process.run("ps -ef | grep '%s' | grep -v grep | awk"
                          " '{print $2}'" % qemu_cmd, shell=True).stdout_text.strip()
        if not pid:
            test.fail("Cannot get pid of qemu command")
        ret = virsh.qemu_attach(pid, **virsh_dargs)
        if ret.exit_status:
            utils_misc.kill_process_tree(pid)
            test.fail("Cannot attach qemu process")
        else:
            virsh.destroy(vm_test)
        hook_str = hook_file + " " + vm_test + " attach begin -"
        if not check_hooks(hook_str):
            test.fail("Failed to check"
                      " attach hooks")

    def edit_iface(net_name):
        """
        Edit interface options for vm.
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        iface_xml = vmxml.get_devices(device_type="interface")[0]
        vmxml.del_device(iface_xml)
        iface_xml.type_name = "network"
        iface_xml.source = {"network": net_name}
        del iface_xml.address
        vmxml.add_device(iface_xml)
        vmxml.sync()

    def network_hook():
        """
        Check network hooks.
        """
        # Set interface to use default network
        net_name = params.get("net_name", "default")
        edit_iface(net_name)
        prepare_hook_file(hook_script %
                          (net_name, hook_log))
        try:
            # destroy the network
            ret = virsh.net_destroy(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " stopped end -"
            assert check_hooks(hook_str)

            # start network
            ret = virsh.net_start(net_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " start begin -"
            assert check_hooks(hook_str)
            hook_str = hook_file + " " + net_name + " started begin -"
            assert check_hooks(hook_str)

            # plug a interface
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            mac_addr = "52:54:00:9a:53:a9"
            ret = virsh.attach_interface(vm_name,
                                         ("network %s --mac %s" %
                                          (net_name, mac_addr)))
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " plugged begin -"
            assert check_hooks(hook_str)
            ret = virsh.detach_interface(vm_name,
                                         "network --mac %s" % mac_addr)
            libvirt.check_exit_status(ret)
            hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
            # remove the log file
            if os.path.exists(hook_log):
                os.remove(hook_log)
            # destroy the domain
            vm.destroy()
            hook_str = hook_file + " " + net_name + " unplugged begin -"
            assert check_hooks(hook_str)
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Failed to check"
                      " network hooks")

    def run_scale_test():
        """
        Try to start and stop domain many times.
        """
        prepare_hook_file(hook_script)
        loop_num = int(params.get("loop_num", 30))
        loop_timeout = int(params.get("loop_timeout", 600))
        cmd1 = ("for i in {1..%s};do echo $i 'start guest -';"
                "virsh start %s;sleep 1;echo $i 'stop guest -';"
                "virsh destroy %s;sleep 1;done;"
                % (loop_num, vm_name, vm_name))
        cmd2 = ("for i in {1..%s};do virsh list;sleep 1;done;"
                % loop_num * 2)
        utils_misc.run_parallel([cmd1, cmd2], timeout=loop_timeout)

    start_error = "yes" == params.get("start_error", "no")
    test_start_stop = "yes" == params.get("test_start_stop", "no")
    test_attach = "yes" == params.get("test_attach", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_saverestore = "yes" == params.get("test_saverestore", "no")
    test_daemon = "yes" == params.get("test_daemon", "no")
    test_network = "yes" == params.get("test_network", "no")
    basic_test = "yes" == params.get("basic_test", "yes")
    scale_test = "yes" == params.get("scale_test", "yes")
    domainxml_test = "yes" == params.get("domainxml_test", "no")

    # The hook script is provided from config
    hook_script = params.get("hook_script")

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        try:
            if test_start_stop:
                start_stop_hook()
            elif test_attach:
                attach_hook()
            elif start_error:
                prepare_hook_file(hook_script %
                                  (vm_name, hook_log))
            elif test_daemon:
                daemon_hook()
            elif test_network:
                network_hook()
            elif scale_test:
                run_scale_test()
            # Start the domain
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if test_libvirtd:
                libvirtd_hook()
            elif test_saverestore:
                save_restore_hook()
            elif test_managedsave:
                managedsave_hook()

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if start_error:
                pass
            else:
                test.fail('VM Failed to start for some reason!')
        else:
            if start_error:
                test.fail('VM started unexpected')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if test_managedsave:
            virsh.managedsave_remove(vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if os.path.exists(hook_file):
            os.remove(hook_file)
        if os.path.exists(hook_log):
            os.remove(hook_log)
        libvirtd.restart()
        vmxml_backup.sync()