Example #1
0
 def kill_vm_by_signal_15():
     vm_pid = vm.get_pid()
     logging.info("VM: %s, PID: %s" % (vm.name, vm_pid))
     thread_pid = os.getpid()
     logging.info("Main Process ID is %s" % thread_pid)
     utils_misc.kill_process_tree(vm_pid, 15)
     return thread_pid
 def kill_vm_by_signal_15():
     vm_pid = vm.get_pid()
     logging.info("VM: %s, PID: %s" % (vm.name, vm_pid))
     thread_pid = os.getpid()
     logging.info("Main Process ID is %s" % thread_pid)
     utils_misc.kill_process_tree(vm_pid, 15)
     return thread_pid
Example #3
0
 def attach_hook():
     """
     Check attach hooks.
     """
     # Start a domain with qemu command.
     disk_src = vm.get_first_disk_devices()['source']
     vm_test = "foo"
     prepare_hook_file(hook_script %
                       (vm_test, hook_log))
     qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
     if "ppc" in platform.machine():
         qemu_bin = "%s -machine pseries" % qemu_bin
     qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                 " -monitor unix:/tmp/demo,"
                 "server,nowait -name %s" %
                 (qemu_bin, disk_src, vm_test))
     ret = utils.run("%s &" % qemu_cmd)
     pid = utils.run("ps -ef | grep '%s' | grep -v grep | awk"
                     " '{print $2}'" % qemu_cmd).stdout.strip()
     if not pid:
         raise error.TestFail("Cannot get pid of qemu command")
     ret = virsh.qemu_attach(pid, **virsh_dargs)
     if ret.exit_status:
         utils_misc.kill_process_tree(pid)
         raise error.TestFail("Cannot attach qemu process")
     else:
         virsh.destroy(vm_test)
     hook_str = hook_file + " " + vm_test + " attach begin -"
     if not check_hooks(hook_str):
         raise error.TestFail("Failed to check"
                              " attach hooks")
Example #4
0
 def attach_hook():
     """
     Check attach hooks.
     """
     # Start a domain with qemu command.
     disk_src = vm.get_first_disk_devices()['source']
     vm_test = "foo"
     prepare_hook_file(hook_script %
                       (vm_test, hook_log))
     qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
     if "ppc" in platform.machine():
         qemu_bin = "%s -machine pseries" % qemu_bin
     qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                 " -monitor unix:/tmp/demo,"
                 "server,nowait -name %s" %
                 (qemu_bin, disk_src, vm_test))
     ret = process.run("%s &" % qemu_cmd, shell=True)
     pid = process.run("ps -ef | grep '%s' | grep -v grep | awk"
                       " '{print $2}'" % qemu_cmd, shell=True).stdout.strip()
     if not pid:
         test.fail("Cannot get pid of qemu command")
     ret = virsh.qemu_attach(pid, **virsh_dargs)
     if ret.exit_status:
         utils_misc.kill_process_tree(pid)
         test.fail("Cannot attach qemu process")
     else:
         virsh.destroy(vm_test)
     hook_str = hook_file + " " + vm_test + " attach begin -"
     if not check_hooks(hook_str):
         test.fail("Failed to check"
                   " attach hooks")
Example #5
0
 def attach_hook():
     """
     Check attach hooks.
     """
     # Start a domain with qemu command.
     disk_src = vm.get_first_disk_devices()['source']
     vm_test = "foo"
     prepare_hook_file(hook_script % (vm_test, hook_log))
     qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
     qemu_cmd = ("%s -cdrom %s -monitor unix:/tmp/demo,"
                 "server,nowait -name %s" % (qemu_bin, disk_src, vm_test))
     ret = utils.run("%s &" % qemu_cmd)
     pid = utils.run("ps -ef | grep '%s' | grep -v grep | awk"
                     " '{print $2}'" % qemu_cmd).stdout.strip()
     if not pid:
         raise error.TestFail("Cannot get pid of qemu command")
     ret = virsh.qemu_attach(pid, **virsh_dargs)
     if ret.exit_status:
         utils_misc.kill_process_tree(pid)
         raise error.TestFail("Cannot attach qemu process")
     else:
         virsh.destroy(vm_test)
     hook_str = hook_file + " " + vm_test + " attach begin -"
     if not check_hooks(hook_str):
         raise error.TestFail("Failed to check" " attach hooks")
Example #6
0
 def attach_hook():
     """
     Check attach hooks.
     """
     # Start a domain with qemu command.
     disk_src = vm.get_first_disk_devices()['source']
     vm_test = "foo"
     prepare_hook_file(hook_script %
                       (vm_test, hook_log))
     qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
     if "ppc" in platform.machine():
         qemu_cmd = ("%s -machine pseries"
                     " -drive file=%s,if=none,bus=0,unit=1"
                     " -monitor unix:/tmp/demo,"
                     "server,nowait -name %s" %
                     (qemu_bin, disk_src, vm_test))
     else:
         qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                     " -monitor unix:/tmp/demo,"
                     "server,nowait -name %s" %
                     (qemu_bin, disk_src, vm_test))
     # After changed above command, qemu-attach failed
     os.system('%s &' % qemu_cmd)
     sta, pid = process.getstatusoutput("pgrep qemu-kvm")
     if not pid:
         test.fail("Cannot get pid of qemu command")
     ret = virsh.qemu_attach(pid, **virsh_dargs)
     if ret.exit_status:
         utils_misc.kill_process_tree(pid)
         test.fail("Cannot attach qemu process")
     else:
         virsh.destroy(vm_test)
     hook_str = hook_file + " " + vm_test + " attach begin -"
     if not check_hooks(hook_str):
         test.fail("Failed to check attach hooks")
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Dump each VM and check result.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    # Run unixbench on guest.
    guest_unixbench_pids = []
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path,
                                          None, None,
                                          params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))
        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            raise error.TestNAError("Failed to run unixbench in guest.\n"
                                    "Since we need to run a autotest of unixbench "
                                    "in guest, so please make sure there are some "
                                    "necessary packages in guest, such as gcc, tar, bzip2")

    logging.debug("Unixbench is already running in VMs.")

    try:
        dump_path = os.path.join(test.tmpdir, "dump_file")
        for vm in vms:
            vm.dump(dump_path)
            # Check the status after vm.dump()
            if not vm.is_alive():
                raise error.TestFail("VM is shutoff after dump.")
            if vm.wait_for_shutdown():
                raise error.TestFail("VM is going to shutdown after dump.")
            # Check VM is running normally.
            vm.wait_for_login()
    finally:
        for pid in guest_unixbench_pids:
            utils_misc.kill_process_tree(pid)
        # Destroy VM.
        for vm in vms:
            vm.destroy()
def run_libvirt_bench_dump_with_unixbench(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Dump each VM and check result.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control")
    # Run unixbench on guest.
    guest_unixbench_pids = []
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control", unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return not session.cmd_status("ps -ef|grep perl|grep Run")

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            raise error.TestNAError(
                "Failed to run unixbench in guest.\n"
                "Since we need to run a autotest of unixbench "
                "in guest, so please make sure there are some "
                "necessary packages in guest, such as gcc, tar, bzip2"
            )

    logging.debug("Unixbench is already running in VMs.")

    try:
        dump_path = os.path.join(test.tmpdir, "dump_file")
        for vm in vms:
            vm.dump(dump_path)
            # Check the status after vm.dump()
            if not vm.is_alive():
                raise error.TestFail("VM is shutoff after dump.")
            if vm.wait_for_shutdown():
                raise error.TestFail("VM is going to shutdown after dump.")
            # Check VM is running normally.
            vm.wait_for_login()
    finally:
        for pid in guest_unixbench_pids:
            utils_misc.kill_process_tree(pid)
        # Destroy VM.
        for vm in vms:
            vm.destroy()
Example #9
0
def raw_ping(command, timeout, session, output_func):
    """
    Low-level ping command execution.

    :param command: Ping command.
    :param timeout: Timeout of the ping command.
    :param session: Local executon hint or session to execute the ping command.
    """
    if session is None:
        process = aexpect.run_bg(command,
                                 output_func=output_func,
                                 timeout=timeout)

        # Send SIGINT signal to notify the timeout of running ping process,
        # Because ping have the ability to catch the SIGINT signal so we can
        # always get the packet loss ratio even if timeout.
        if process.is_alive():
            utils_misc.kill_process_tree(process.get_pid(), signal.SIGINT)

        status = process.get_status()
        output = process.get_output()

        process.close()
        return status, output
    else:
        output = ""
        try:
            output = session.cmd_output(command,
                                        timeout=timeout,
                                        print_func=output_func)
        except aexpect.ShellTimeoutError:
            # Send ctrl+c (SIGINT) through ssh session
            session.send("\003")
            try:
                output2 = session.read_up_to_prompt(print_func=output_func)
                output += output2
            except aexpect.ExpectTimeoutError, e:
                output += e.output
                # We also need to use this session to query the return value
                session.send("\003")

        session.sendline(session.status_test_command)
        try:
            o2 = session.read_up_to_prompt()
        except aexpect.ExpectError:
            status = -1
        else:
            try:
                status = int(re.findall("\d+", o2)[0])
            except Exception:
                status = -1

        return status, output
Example #10
0
    def kill_vm_process(vm):
        """kill vm process

        :param vm: vm object
        """
        pid = vm.process.get_pid()
        logging.debug("Ending VM %s process (killing PID %s)", vm.name, pid)
        try:
            utils_misc.kill_process_tree(pid, 9, timeout=60)
            logging.debug("VM %s down (process killed)", vm.name)
        except RuntimeError:
            test.error("VM %s (PID %s) is a zombie!" %
                       (vm.name, vm.process.get_pid()))
Example #11
0
def raw_ping(command, timeout, session, output_func):
    """
    Low-level ping command execution.

    :param command: Ping command.
    :param timeout: Timeout of the ping command.
    :param session: Local executon hint or session to execute the ping command.
    """
    if session is None:
        process = aexpect.run_bg(command, output_func=output_func,
                                 timeout=timeout)

        # Send SIGINT signal to notify the timeout of running ping process,
        # Because ping have the ability to catch the SIGINT signal so we can
        # always get the packet loss ratio even if timeout.
        if process.is_alive():
            utils_misc.kill_process_tree(process.get_pid(), signal.SIGINT)

        status = process.get_status()
        output = process.get_output()

        process.close()
        return status, output
    else:
        output = ""
        try:
            output = session.cmd_output(command, timeout=timeout,
                                        print_func=output_func)
        except aexpect.ShellTimeoutError:
            # Send ctrl+c (SIGINT) through ssh session
            session.send("\003")
            try:
                output2 = session.read_up_to_prompt(print_func=output_func)
                output += output2
            except aexpect.ExpectTimeoutError, e:
                output += e.output
                # We also need to use this session to query the return value
                session.send("\003")

        session.sendline(session.status_test_command)
        try:
            o2 = session.read_up_to_prompt()
        except aexpect.ExpectError:
            status = -1
        else:
            try:
                status = int(re.findall("\d+", o2)[0])
            except Exception:
                status = -1

        return status, output
Example #12
0
def run(test, params, env):
    """
    Emulate the poweroff under IO workload(dd so far) with signal SIGKILL.

    1) Boot a VM
    2) Add IO workload for guest OS
    3) Sleep for a random time
    4) Kill the VM
    5) Check the image to verify if errors are found except some cluster leaks

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)
    session2 = vm.wait_for_login(timeout=login_timeout)

    bg_cmd = params.get("background_cmd")
    error_context.context("Add IO workload for guest OS.", test.log.info)
    session.cmd_output(bg_cmd, timeout=60)

    error_context.context("Verify the background process is running")
    check_cmd = params.get("check_cmd")
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Sleep for a random time", test.log.info)
    time.sleep(random.randrange(30, 100))
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Kill the VM", test.log.info)
    utils_misc.kill_process_tree(vm.process.get_pid(), timeout=60)
    error_context.context("Check img after kill VM", test.log.info)
    base_dir = data_dir.get_data_dir()
    image_name = params.get("image_name")
    image = qemu_storage.QemuImg(params, base_dir, image_name)
    try:
        image.check_image(params, base_dir)
    except Exception as exc:
        if "Leaked clusters" not in six.text_type(exc):
            raise
        error_context.context("Detected cluster leaks, try to repair it",
                              test.log.info)
        restore_cmd = params.get("image_restore_cmd") % image.image_filename
        cmd_status = process.system(restore_cmd, shell=True)
        if cmd_status:
            test.fail("Failed to repair cluster leaks on the image")
Example #13
0
def run(test, params, env):
    """
    Emulate the poweroff under IO workload(dd so far) with signal SIGKILL.

    1) Boot a VM
    2) Add IO workload for guest OS
    3) Sleep for a random time
    4) Kill the VM
    5) Check the image to verify if errors are found except some cluster leaks

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)
    session2 = vm.wait_for_login(timeout=login_timeout)

    bg_cmd = params.get("background_cmd")
    error_context.context("Add IO workload for guest OS.", logging.info)
    session.cmd_output(bg_cmd, timeout=60)

    error_context.context("Verify the background process is running")
    check_cmd = params.get("check_cmd")
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Sleep for a random time", logging.info)
    time.sleep(random.randrange(30, 100))
    session2.cmd(check_cmd, timeout=360)

    error_context.context("Kill the VM", logging.info)
    utils_misc.kill_process_tree(vm.process.get_pid(), wait=60)
    error_context.context("Check img after kill VM", logging.info)
    base_dir = data_dir.get_data_dir()
    image_name = params.get("image_name")
    image = qemu_storage.QemuImg(params, base_dir, image_name)
    try:
        image.check_image(params, base_dir)
    except Exception as exc:
        if "Leaked clusters" not in six.text_type(exc):
            raise
        error_context.context("Detected cluster leaks, try to repair it",
                              logging.info)
        restore_cmd = params.get("image_restore_cmd") % image.image_filename
        cmd_status = process.system(restore_cmd, shell=True)
        if cmd_status:
            test.fail("Failed to repair cluster leaks on the image")
    def expose_nonexist_bitmap(self):
        def _nbd_expose_cmd(qemu_nbd, filename, local_image, params):
            cmd_dict = {
                "export_format": "",
                "persistent": "-t",
                "port": "",
                "filename": "",
                "fork": "--fork",
                "pid_file": "",
                "bitmap": "",
                }
            export_cmd = ('{export_format} {persistent} {port} {bitmap} '
                          '{fork} {pid_file} {filename}')
            pid_file = utils_misc.generate_tmp_file_name('%s_nbd_server'
                                                         % local_image, 'pid')
            cmd_dict['pid_file'] = '--pid-file %s' % pid_file
            cmd_dict['filename'] = filename
            if params.get('nbd_export_format'):
                cmd_dict['export_format'] = '-f %s' % params['nbd_export_format']
            else:
                if params.get('nbd_port'):
                    cmd_dict['port'] = '-p %s' % params['nbd_port']
            if params.get('nbd_export_bitmaps'):
                cmd_dict['bitmap'] = "".join(
                    [" -B %s" % _ for _ in params['nbd_export_bitmaps'].split()])
            cmdline = qemu_nbd + ' ' + string.Formatter().format(export_cmd,
                                                                 **cmd_dict)
            return pid_file, cmdline

        logging.info("Export inconsistent bitmap with qemu-nbd")
        pid_file, cmd = _nbd_expose_cmd(self.nbd_exports[0]._qemu_nbd,
                                        self.nbd_exports[0]._local_filename,
                                        self.nbd_exports[0]._tag,
                                        self.nbd_exports[0]._image_params)
        result = process.run(cmd, ignore_status=True, shell=True,
                             ignore_bg_processes=True)
        if result.exit_status == 0:
            with open(pid_file, "r") as pid_file_fd:
                qemu_nbd_pid = int(pid_file_fd.read().strip())
            os.unlink(pid_file)
            utils_misc.kill_process_tree(qemu_nbd_pid, 9, timeout=60)
            self.test.fail("Can expose image with a non-exist bitmap")

        error_msg = self.params.get("error_msg") % self.bitmaps[0]
        if error_msg not in result.stderr.decode():
            self.test.fail(result.stderr.decode())
Example #15
0
    def expose_inconsistent_bitmap(self):
        logging.info("Export inconsistent bitmap with qemu-nbd")
        img_path = data_dir.get_data_dir()
        qemu_nbd_cmd = utils_misc.get_qemu_nbd_binary(self.params)
        cmd = self.params.get("export_cmd") % (qemu_nbd_cmd, self.bitmaps[0],
                                               img_path)
        result = process.run(cmd,
                             ignore_status=True,
                             shell=True,
                             ignore_bg_processes=True)
        if result.exit_status == 0:
            ck_qemunbd_pid = self.params.get("ck_qemunbd_pid")
            qemu_nbd_ck = process.run(ck_qemunbd_pid,
                                      ignore_status=True,
                                      shell=True,
                                      ignore_bg_processes=True)
            qemu_nbd_pid = qemu_nbd_ck.stdout_text.strip()
            utils_misc.kill_process_tree(qemu_nbd_pid, 9, timeout=60)
            self.test.fail("Can expose image with a non-exist bitmap")

        error_msg = self.params.get("error_msg") % self.bitmaps[0]
        if error_msg not in result.stderr.decode():
            self.test.fail(result.stderr.decode())
Example #16
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")
    remote_uri = params.get("remote_uri")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(),
                                         "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
                    if start_action == "restart_libvirtd":
                        libvirtd.restart()
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        # Timing issue cause test to check domstate before prior action
        # kill gets completed
        if vm_action == "kill":
            utils_misc.wait_for(vm.is_dead, timeout=20)

        if remote_uri:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            if remote_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

        result = virsh.domstate(vm_ref,
                                extra,
                                ignore_status=True,
                                debug=True,
                                uri=remote_uri)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                if libvirtd_state == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed.")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
Example #17
0
                    logging.info("Autotest job did not end, start a round of "
                                 "migration")
                    vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
            else:
                session.cmd_output("./autotest-local --args=\"%s\" --verbose"
                                   " control" % (control_args),
                                   timeout=timeout,
                                   print_func=logging.info)
        finally:
            logging.info("------------- End of test output ------------")
            if migrate_background and bg:
                bg.join()
            # Do some cleanup work on host if test need a server.
            if server_process:
                if server_process.is_alive():
                    utils_misc.kill_process_tree(server_process.get_pid(),
                                                 signal.SIGINT)
                server_process.close()

                # Remove the result dir produced by server_process.
                server_result = os.path.join(autotest_path,
                                             "results",
                                             os.path.basename(server_control_path))
                if os.path.isdir(server_result):
                    utils.safe_rmdir()
                # Remove the control file for server.
                if os.path.exists(server_control_path):
                    os.remove(server_control_path)

    except aexpect.ShellTimeoutError:
        if vm.is_alive():
            get_results(destination_autotest_path)
Example #18
0
def run(test, params, env):
    """
    Qemu reboot test:
    1) Boot up a windows guest.
    2) Run stress tool on host.
    3) After guest starts up, start the ftrace.
    4) Reboot VM inside guest.
    5.1) If the guest reboot successfully, then stop the trace-cmd and remove
         the trace.dat file.
    5.2) If guest hang, stop the trace-cmd and generate the readable report
         file.
    6) if 5.2, Check whether the trace.txt includes the error log.
    7) Repeat step 3~6.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def find_trace_cmd():
        if process.system("ps -a | grep trace-cmd", ignore_status=True,
                          shell=True):
            return False
        else:
            return True

    if os.system("which trace-cmd"):
        test.cancel("Please install trace-cmd.")

    timeout = float(params.get("login_timeout", 240))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    reboot_method = params["reboot_method"]
    stress_cmd = params.get("stress_cmd", "stress --vm 4 --vm-bytes 1000M")

    trace_o = os.path.join(test.debugdir, "trace.dat")
    trace_cmd = "trace-cmd record -b 20000 -e kvm -o %s" % trace_o
    trace_cmd = params.get("trace_cmd", trace_cmd)
    re_trace = params.get("re_trace", "kvm_inj_exception:    #GP")

    report_file = os.path.join(test.debugdir, "trace.txt")
    trace_report_cmd = "trace-cmd report -i %s > %s " % (trace_o, report_file)
    try:
        error_context.context("Run stress tool on host.", logging.info)
        stress_job = utils_misc.BgJob(stress_cmd)
        # Reboot the VM
        for num in range(int(params.get("reboot_count", 1))):
            error_context.context("Reboot guest '%s'. Repeat %d" %
                                  (vm.name, num + 1), logging.info)
            trace_job = utils_misc.BgJob(trace_cmd)
            try:
                session = vm.reboot(session,
                                    reboot_method,
                                    0,
                                    timeout)
            except Exception:
                txt = "stop the trace-cmd and generate the readable report."
                error_context.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(),
                                           120, 60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                process.system(trace_report_cmd, shell=True)
                with open(report_file) as report_f:
                    report_txt = report_f.read()
                txt = "Check whether the trace.txt includes the error log."
                error_context.context(txt, logging.info)
                if re.findall(re_trace, report_txt, re.S):
                    msg = "Found %s in trace log %s" % (re_trace, report_file)
                    test.fail(msg)
            else:
                txt = "stop the trace-cmd and remove the trace.dat file."
                error_context.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(),
                                           120, 60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                process.system("rm -rf %s" % trace_o, timeout=60)
    finally:
        if session:
            session.close()
        if stress_job and stress_job.sp.poll() is None:
            utils_misc.kill_process_tree(stress_job.sp.pid, 9)
        if trace_job:
            if trace_job.sp.poll() is None:
                os.kill(trace_job.sp.pid, signal.SIGINT)
Example #19
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600"))
    # Run unixbench on guest.
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            test.cancel("Failed to run unixbench in guest.\n"
                        "Since we need to run a autotest of unixbench "
                        "in guest, so please make sure there are some "
                        "necessary packages in guest, such as gcc, tar, bzip2")
    logging.debug("Unixbench is already running in VMs.")

    # Run unixbench on host.
    from autotest.client import common
    autotest_client_dir = os.path.dirname(common.__file__)
    autotest_local_path = os.path.join(autotest_client_dir, "autotest-local")
    unixbench_control_path = os.path.join(data_dir.get_root_dir(), "shared",
                                          "control", unixbench_control_file)
    args = [
        autotest_local_path, unixbench_control_path, '--verbose', '-t',
        unixbench_control_file
    ]
    host_unixbench_process = subprocess.Popen(args)

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()
    finally:
        # Kill process on host running unixbench.
        utils_misc.kill_process_tree(host_unixbench_process.pid)
        # Remove the result dir produced by subprocess host_unixbench_process.
        unixbench_control_result = os.path.join(autotest_client_dir, "results",
                                                unixbench_control_file)
        if os.path.isdir(unixbench_control_result):
            shutil.rmtree(unixbench_control_result)
    def kill_qemu_and_start_vm(self):
        """Forcely killing qemu-kvm can make bitmap inconsistent"""

        kill_process_tree(self.main_vm.get_pid(), signal.SIGKILL, timeout=20)
        self.main_vm.create()
        self.main_vm.verify_alive()
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600"))
    # Run unixbench on guest.
    guest_unixbench_pids = []
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path,
                                          None, None,
                                          params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))
        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            raise error.TestNAError("Failed to run unixbench in guest.\n"
                                    "Since we need to run a autotest of unixbench "
                                    "in guest, so please make sure there are some "
                                    "necessary packages in guest, such as gcc, tar, bzip2")
    logging.debug("Unixbench is already running in VMs.")

    # Run unixbench on host.
    from autotest.client import common
    autotest_client_dir = os.path.dirname(common.__file__)
    autotest_local_path = os.path.join(autotest_client_dir, "autotest-local")
    unixbench_control_path = os.path.join(data_dir.get_root_dir(),
                                          "shared", "control",
                                          unixbench_control_file)
    args = [autotest_local_path, unixbench_control_path, '--verbose',
            '-t', unixbench_control_file]
    host_unixbench_process = subprocess.Popen(args)

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()
    finally:
        # Kill process on host running unixbench.
        utils_misc.kill_process_tree(host_unixbench_process.pid)
        # Remove the result dir produced by subprocess host_unixbench_process.
        unixbench_control_result = os.path.join(autotest_client_dir,
                                                "results",
                                                unixbench_control_file)
        if os.path.isdir(unixbench_control_result):
            shutil.rmtree(unixbench_control_result)
Example #22
0
def run(test, params, env):
    """
    Qemu reboot test:
    1) Boot up a windows guest.
    2) Run stress tool on host.
    3) After guest starts up, start the ftrace.
    4) Reboot VM inside guest.
    5.1) If the guest reboot successfully, then stop the trace-cmd and remove
         the trace.dat file.
    5.2) If guest hang, stop the trace-cmd and generate the readable report
         file.
    6) if 5.2, Check whether the trace.txt includes the error log.
    7) Repeat step 3~6.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def find_trace_cmd():
        if utils.system("ps -a | grep trace-cmd", ignore_status=True):
            return False
        else:
            return True

    if os.system("which trace-cmd"):
        raise error.TestNAError("Please install trace-cmd.")

    timeout = float(params.get("login_timeout", 240))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    reboot_method = params["reboot_method"]
    stress_cmd = params.get("stress_cmd", "stress --vm 4 --vm-bytes 1000M")

    trace_o = os.path.join(test.debugdir, "trace.dat")
    trace_cmd = "trace-cmd record -b 20000 -e kvm -o %s" % trace_o
    trace_cmd = params.get("trace_cmd", trace_cmd)
    re_trace = params.get("re_trace", "kvm_inj_exception:    #GP")

    report_file = os.path.join(test.debugdir, "trace.txt")
    trace_report_cmd = "trace-cmd report -i %s > %s " % (trace_o, report_file)
    try:
        error.context("Run stress tool on host.", logging.info)
        stress_job = utils.BgJob(stress_cmd)
        # Reboot the VM
        for num in xrange(int(params.get("reboot_count", 1))):
            error.context("Reboot guest '%s'. Repeat %d" % (vm.name, num + 1),
                          logging.info)
            trace_job = utils.BgJob(trace_cmd)
            try:
                session = vm.reboot(session, reboot_method, 0, timeout)
            except Exception, err:
                txt = "stop the trace-cmd and generate the readable report."
                error.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(), 180,
                                           60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                utils.system(trace_report_cmd)
                report_txt = file(report_file).read()
                txt = "Check whether the trace.txt includes the error log."
                error.context(txt, logging.info)
                if re.findall(re_trace, report_txt, re.S):
                    msg = "Found %s in trace log %s" % (re_trace, report_file)
                    logging.info(msg)
                    error.TestFail(msg)
            else:
                txt = "stop the trace-cmd and remove the trace.dat file."
                error.context(txt, logging.info)
                os.kill(trace_job.sp.pid, signal.SIGINT)
                if not utils_misc.wait_for(lambda: not find_trace_cmd(), 120,
                                           60, 3):
                    logging.warn("trace-cmd could not finish after 120s.")
                trace_job = None
                utils.system("rm -rf %s" % trace_o, timeout=60)
    finally:
        if session:
            session.close()
        if stress_job and stress_job.sp.poll() is None:
            utils_misc.kill_process_tree(stress_job.sp.pid, 9)
        if trace_job:
            if trace_job.sp.poll() is None:
                os.kill(trace_job.sp.pid, signal.SIGINT)
Example #23
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")
    remote_uri = params.get("remote_uri")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
                    if start_action == "restart_libvirtd":
                        libvirtd.restart()
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name, dump_file, dump_option, ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        # Timing issue cause test to check domstate before prior action
        # kill gets completed
        if vm_action == "kill":
            time.sleep(2)

        if remote_uri:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            if remote_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

        result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                debug=True, uri=remote_uri)
        status = result.exit_status
        output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in ['coredump-destroy',
                                             'coredump-restart']:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart" and
                            check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists"
                                      % libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True, shell=True).exit_status:
                            test.fail("Find error message %s from log file: %s."
                                      % (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)