コード例 #1
0
def run(test, params, env):
    """
    Run an autotest test inside a guest.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    # Collect test parameters
    timeout = int(params.get("test_timeout", 300))
    control_args = params.get("control_args")
    control_path = os.path.join(test.virtdir, "control",
                                params.get("test_control_file"))
    ignore_sess_terminated = params.get("ignore_session_terminated") == "yes"
    outputdir = test.outputdir

    utils_test.run_autotest(vm,
                            session,
                            control_path,
                            timeout,
                            outputdir,
                            params,
                            control_args=control_args,
                            ignore_session_terminated=ignore_sess_terminated)
コード例 #2
0
ファイル: autotest_control.py プロジェクト: CongLi/tp-qemu
def run(test, params, env):
    """
    Run an autotest test inside a guest.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    # Collect test parameters
    timeout = int(params.get("test_timeout", 300))
    control_args = params.get("control_args")
    control_path = os.path.join(test.virtdir, "control", params.get("test_control_file"))
    ignore_sess_terminated = params.get("ignore_session_terminated") == "yes"
    outputdir = test.outputdir

    utils_test.run_autotest(
        vm,
        session,
        control_path,
        timeout,
        outputdir,
        params,
        control_args=control_args,
        ignore_session_terminated=ignore_sess_terminated,
    )
コード例 #3
0
def run(test, params, env):
    """
    Run an autotest test inside a guest.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    mount_dir = params.get("9p_mount_dir")

    if mount_dir is None:
        logging.info("User Variable for mount dir is not set")
    else:
        session.cmd("mkdir -p %s" % mount_dir)

        mount_option = " trans=virtio"

        p9_proto_version = params.get("9p_proto_version", "9p2000.L")
        mount_option += ",version=" + p9_proto_version

        guest_cache = params.get("9p_guest_cache")
        if guest_cache == "yes":
            mount_option += ",cache=loose"

        posix_acl = params.get("9p_posix_acl")
        if posix_acl == "yes":
            mount_option += ",posixacl"

        logging.info("Mounting 9p mount point with options %s" % mount_option)
        cmd = "mount -t 9p -o %s autotest_tag %s" % (mount_option, mount_dir)
        mount_status = session.get_command_status(cmd)

        if (mount_status != 0):
            logging.error("mount failed")
            raise error.TestFail('mount failed.')

        # Collect test parameters
        timeout = int(params.get("test_timeout", 14400))
        control_path = os.path.join(test.virtdir, "autotest_control",
                                    params.get("test_control_file"))

        outputdir = test.outputdir

        utils_test.run_autotest(vm, session, control_path,
                                timeout, outputdir, params)
コード例 #4
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Dump each VM and check result.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    # Run unixbench on guest.
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            test.cancel("Failed to run unixbench in guest.\n"
                        "Since we need to run a autotest of unixbench "
                        "in guest, so please make sure there are some "
                        "necessary packages in guest, such as gcc, tar, bzip2")

    logging.debug("Unixbench is already running in VMs.")

    try:
        dump_path = os.path.join(data_dir.get_tmp_dir(), "dump_file")
        for vm in vms:
            vm.dump(dump_path)
            # Check the status after vm.dump()
            if not vm.is_alive():
                test.fail("VM is shutoff after dump.")
            if vm.wait_for_shutdown():
                test.fail("VM is going to shutdown after dump.")
            # Check VM is running normally.
            vm.wait_for_login()
    finally:
        # Destroy VM.
        for vm in vms:
            vm.destroy()
コード例 #5
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run iozone on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    iozone_control_file = params.get("iozone_control_file",
                                     "iozone.control")
    timeout = int(params.get("LB_domstate_with_iozone_loop_time", "600"))
    # Run iozone on guest.
    params["test_control_file"] = iozone_control_file
    # Fork a new process to run iozone on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    iozone_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path,
                                          None, None,
                                          params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_iozone_running():
            return (not session.cmd_status("ps -ef|grep iozone|grep -v grep"))
        if not utils_misc.wait_for(_is_iozone_running, timeout=120):
            raise error.TestNAError("Failed to run iozone in guest.\n"
                                    "Since we need to run a autotest of iozone "
                                    "in guest, so please make sure there are "
                                    "some necessary packages in guest,"
                                    "such as gcc, tar, bzip2")
    logging.debug("Iozone is already running in VMs.")

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()

        # Reboot vms after func_in_thread to check vm is running normally.
        for vm in vms:
            vm.reboot()
    finally:
        # Clean up.
        logging.debug("No cleaning operation for this test.")
コード例 #6
0
def run_libvirt_bench_dump_with_unixbench(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Dump each VM and check result.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control")
    # Run unixbench on guest.
    guest_unixbench_pids = []
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control", unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return not session.cmd_status("ps -ef|grep perl|grep Run")

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            raise error.TestNAError(
                "Failed to run unixbench in guest.\n"
                "Since we need to run a autotest of unixbench "
                "in guest, so please make sure there are some "
                "necessary packages in guest, such as gcc, tar, bzip2"
            )

    logging.debug("Unixbench is already running in VMs.")

    try:
        dump_path = os.path.join(test.tmpdir, "dump_file")
        for vm in vms:
            vm.dump(dump_path)
            # Check the status after vm.dump()
            if not vm.is_alive():
                raise error.TestFail("VM is shutoff after dump.")
            if vm.wait_for_shutdown():
                raise error.TestFail("VM is going to shutdown after dump.")
            # Check VM is running normally.
            vm.wait_for_login()
    finally:
        for pid in guest_unixbench_pids:
            utils_misc.kill_process_tree(pid)
        # Destroy VM.
        for vm in vms:
            vm.destroy()
コード例 #7
0
def run_autotest_distro_detect(test, params, env):
    """
    Run an distro detection check on guest

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    # Collect test parameters
    timeout = int(params.get("test_timeout", 90))
    control_path = generate_control_file(params)
    outputdir = test.outputdir

    utils_test.run_autotest(vm, session, control_path, timeout, outputdir, params)
コード例 #8
0
def run(test, params, env):
    """
    Run an distro detection check on guest

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    # Collect test parameters
    timeout = int(params.get("test_timeout", 90))
    control_path = generate_control_file(params)
    outputdir = test.outputdir

    utils_test.run_autotest(vm, session, control_path, timeout, outputdir,
                            params)
コード例 #9
0
def run_autotest_control(test, params, env):
    """
    Run an autotest test inside a guest.

    @param test: kvm test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    # Collect test parameters
    timeout = int(params.get("test_timeout", 300))
    control_path = os.path.join(test.virtdir, "control",
                                params.get("test_control_file"))
    outputdir = test.outputdir

    utils_test.run_autotest(vm, session, control_path, timeout, outputdir,
                            params)
コード例 #10
0
ファイル: cpu_hotplug.py プロジェクト: zixi-chen/tp-qemu
def run(test, params, env):
    """
    Runs CPU hotplug test:

    1) Boot the vm with -smp X,maxcpus=Y
    2) After logged into the vm, check CPUs number
    3) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
    4) Verify if guest has the additional CPUs showing up
    5) reboot the vm
    6) recheck guest get hot-pluged CPUs
    7) Try to bring them online by writing 1 to the 'online' file inside
       that dir(Linux guest only)
    8) Run the CPU Hotplug test suite shipped with autotest inside guest
       (Linux guest only)

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    error_context.context(
        "boot the vm, with '-smp X,maxcpus=Y' option,"
        "thus allow hotplug vcpu", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    maxcpus = int(params.get("maxcpus", 160))
    current_cpus = int(params.get("smp", 1))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    cpu_hotplug_cmd = params.get("cpu_hotplug_cmd", "")

    if n_cpus_add + current_cpus > maxcpus:
        logging.warn("CPU quantity more than maxcpus, set it to %s", maxcpus)
        total_cpus = maxcpus
    else:
        total_cpus = current_cpus + n_cpus_add

    error_context.context(
        "check if CPUs in guest matches qemu cmd "
        "before hot-plug", logging.info)
    if not cpu.check_if_vm_vcpu_match(current_cpus, vm):
        test.error("CPU quantity mismatch cmd before hotplug !")

    for cpuid in range(current_cpus, total_cpus):
        error_context.context("hot-pluging vCPU %s" % cpuid, logging.info)
        vm.hotplug_vcpu(cpu_id=cpuid, plug_command=cpu_hotplug_cmd)

    output = vm.monitor.send_args_cmd("info cpus")
    logging.debug("Output of info CPUs:\n%s", output)

    cpu_regexp = re.compile(r"CPU #(\d+)")
    total_cpus_monitor = len(cpu_regexp.findall(output))
    if total_cpus_monitor != total_cpus:
        test.fail("Monitor reports %s CPUs, when VM should have"
                  " %s" % (total_cpus_monitor, total_cpus))
    # Windows is a little bit lazy that needs more secs to recognize.
    error_context.context(
        "hotplugging finished, let's wait a few sec and"
        " check CPUs quantity in guest.", logging.info)
    if not utils_misc.wait_for(
            lambda: cpu.check_if_vm_vcpu_match(total_cpus, vm),
            60 + total_cpus,
            first=10,
            step=5.0,
            text="retry later"):
        test.fail("CPU quantity mismatch cmd after hotplug !")
    error_context.context("rebooting the vm and check CPU quantity !",
                          logging.info)
    session = vm.reboot()
    if not cpu.check_if_vm_vcpu_match(total_cpus, vm):
        test.fail("CPU quantity mismatch cmd after hotplug and reboot !")

    # Window guest doesn't support online/offline test
    if params['os_type'] == "windows":
        return

    error_context.context("locating online files for guest's new CPUs")
    r_cmd = 'find /sys/devices/system/cpu/cpu*/online -maxdepth 0 -type f'
    online_files = session.cmd(r_cmd)
    # Sometimes the return value include command line itself
    if "find" in online_files:
        online_files = " ".join(online_files.strip().split("\n")[1:])
    logging.debug("CPU online files detected: %s", online_files)
    online_files = online_files.split()
    online_files.sort()

    if not online_files:
        test.fail("Could not find CPUs that can be enabled/disabled on guest")

    control_path = os.path.join(test.virtdir, "control", "cpu_hotplug.control")

    timeout = int(params.get("cpu_hotplug_timeout", 300))
    error_context.context("running cpu_hotplug autotest after cpu addition")
    utils_test.run_autotest(vm, session, control_path, timeout, test.outputdir,
                            params)

    # Last, but not least, let's offline/online the CPUs in the guest
    # several times
    irq = 15
    irq_mask = "f0"
    for i in range(onoff_iterations):
        session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
        for online_file in online_files:
            session.cmd("echo 0 > %s" % online_file)
        for online_file in online_files:
            session.cmd("echo 1 > %s" % online_file)
コード例 #11
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600"))
    # Run unixbench on guest.
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            test.cancel("Failed to run unixbench in guest.\n"
                        "Since we need to run a autotest of unixbench "
                        "in guest, so please make sure there are some "
                        "necessary packages in guest, such as gcc, tar, bzip2")
    logging.debug("Unixbench is already running in VMs.")

    # Run unixbench on host.
    from autotest.client import common
    autotest_client_dir = os.path.dirname(common.__file__)
    autotest_local_path = os.path.join(autotest_client_dir, "autotest-local")
    unixbench_control_path = os.path.join(data_dir.get_root_dir(), "shared",
                                          "control", unixbench_control_file)
    args = [
        autotest_local_path, unixbench_control_path, '--verbose', '-t',
        unixbench_control_file
    ]
    host_unixbench_process = subprocess.Popen(args)

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()
    finally:
        # Kill process on host running unixbench.
        utils_misc.kill_process_tree(host_unixbench_process.pid)
        # Remove the result dir produced by subprocess host_unixbench_process.
        unixbench_control_result = os.path.join(autotest_client_dir, "results",
                                                unixbench_control_file)
        if os.path.isdir(unixbench_control_result):
            shutil.rmtree(unixbench_control_result)
コード例 #12
0
ファイル: cpu_hotplug.py プロジェクト: Chenditang/tp-qemu
def run(test, params, env):
    """
    Runs CPU hotplug test:

    1) Boot the vm with -smp X,maxcpus=Y
    2) After logged into the vm, check CPUs number
    3) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
    4) Verify if guest has the additional CPUs showing up
    5) reboot the vm
    6) recheck guest get hot-pluged CPUs
    7) Try to bring them online by writing 1 to the 'online' file inside
       that dir(Linux guest only)
    8) Run the CPU Hotplug test suite shipped with autotest inside guest
       (Linux guest only)

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    error.context("boot the vm, with '-smp X,maxcpus=Y' option,"
                  "thus allow hotplug vcpu", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    maxcpus = int(params.get("maxcpus", 160))
    current_cpus = int(params.get("smp", 1))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    cpu_hotplug_cmd = params.get("cpu_hotplug_cmd", "")

    if n_cpus_add + current_cpus > maxcpus:
        logging.warn("CPU quantity more than maxcpus, set it to %s", maxcpus)
        total_cpus = maxcpus
    else:
        total_cpus = current_cpus + n_cpus_add

    error.context("check if CPUs in guest matches qemu cmd "
                  "before hot-plug", logging.info)
    if not utils_misc.check_if_vm_vcpu_match(current_cpus, vm):
        raise error.TestError("CPU quantity mismatch cmd before hotplug !")

    for cpu in range(current_cpus, total_cpus):
        error.context("hot-pluging vCPU %s" % cpu, logging.info)
        vm.hotplug_vcpu(cpu_id=cpu, plug_command=cpu_hotplug_cmd)

    output = vm.monitor.send_args_cmd("info cpus")
    logging.debug("Output of info CPUs:\n%s", output)

    cpu_regexp = re.compile("CPU #(\d+)")
    total_cpus_monitor = len(cpu_regexp.findall(output))
    if total_cpus_monitor != total_cpus:
        raise error.TestFail("Monitor reports %s CPUs, when VM should have"
                             " %s" % (total_cpus_monitor, total_cpus))
    # Windows is a little bit lazy that needs more secs to recognize.
    error.context("hotplugging finished, let's wait a few sec and"
                  " check CPUs quantity in guest.", logging.info)
    if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
                               total_cpus, vm),
                               60 + total_cpus, first=10,
                               step=5.0, text="retry later"):
        raise error.TestFail("CPU quantity mismatch cmd after hotplug !")
    error.context("rebooting the vm and check CPU quantity !", logging.info)
    session = vm.reboot()
    if not utils_misc.check_if_vm_vcpu_match(total_cpus, vm):
        raise error.TestFail("CPU quantity mismatch cmd after hotplug "
                             "and reboot !")

    # Window guest doesn't support online/offline test
    if params['os_type'] == "windows":
        return

    error.context("locating online files for guest's new CPUs")
    r_cmd = 'find /sys/devices/system/cpu/cpu*/online -maxdepth 0 -type f'
    online_files = session.cmd(r_cmd)
    # Sometimes the return value include command line itself
    if "find" in online_files:
        online_files = " ".join(online_files.strip().split("\n")[1:])
    logging.debug("CPU online files detected: %s", online_files)
    online_files = online_files.split()
    online_files.sort()

    if not online_files:
        raise error.TestFail("Could not find CPUs that can be "
                             "enabled/disabled on guest")

    control_path = os.path.join(test.virtdir, "control",
                                "cpu_hotplug.control")

    timeout = int(params.get("cpu_hotplug_timeout", 300))
    error.context("running cpu_hotplug autotest after cpu addition")
    utils_test.run_autotest(vm, session, control_path, timeout,
                            test.outputdir, params)

    # Last, but not least, let's offline/online the CPUs in the guest
    # several times
    irq = 15
    irq_mask = "f0"
    for i in xrange(onoff_iterations):
        session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
        for online_file in online_files:
            session.cmd("echo 0 > %s" % online_file)
        for online_file in online_files:
            session.cmd("echo 1 > %s" % online_file)
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600"))
    # Run unixbench on guest.
    guest_unixbench_pids = []
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path,
                                          None, None,
                                          params, copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))
        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            raise error.TestNAError("Failed to run unixbench in guest.\n"
                                    "Since we need to run a autotest of unixbench "
                                    "in guest, so please make sure there are some "
                                    "necessary packages in guest, such as gcc, tar, bzip2")
    logging.debug("Unixbench is already running in VMs.")

    # Run unixbench on host.
    from autotest.client import common
    autotest_client_dir = os.path.dirname(common.__file__)
    autotest_local_path = os.path.join(autotest_client_dir, "autotest-local")
    unixbench_control_path = os.path.join(data_dir.get_root_dir(),
                                          "shared", "control",
                                          unixbench_control_file)
    args = [autotest_local_path, unixbench_control_path, '--verbose',
            '-t', unixbench_control_file]
    host_unixbench_process = subprocess.Popen(args)

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()
    finally:
        # Kill process on host running unixbench.
        utils_misc.kill_process_tree(host_unixbench_process.pid)
        # Remove the result dir produced by subprocess host_unixbench_process.
        unixbench_control_result = os.path.join(autotest_client_dir,
                                                "results",
                                                unixbench_control_file)
        if os.path.isdir(unixbench_control_result):
            shutil.rmtree(unixbench_control_result)
コード例 #14
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            test.cancel("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            test.cancel("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError as e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    test.fail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError) as e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                test.fail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    test.cancel("Failed to run unixbench in guest,"
                                " please make sure some necessary"
                                " packages are installed in guest,"
                                " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")

        if test_type == "contrast":
            # wait for vm finish starting completely
            time.sleep(delay)

        if not (mb_enable and not tlbfs_enable):
            logging.debug("starting analyzing the hugepage usage...")
            pid = vms[-1].get_pid()
            started_free = utils_memory.get_num_huge_pages_free()
            # Get the thp usage from /proc/pid/smaps
            started_anon = utils_memory.get_num_anon_huge_pages(pid)
            static_used = non_started_free - started_free
            hugepage_used = static_used * page_size

            if test_type == "contrast":
                # get qemu-kvm memory consumption by top
                cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid
                rate = process.run(cmd, ignore_status=False,
                                   verbose=True, shell=True).stdout_text.strip()
                qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100
                logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d",
                              rate, qemu_kvm_used, hugepage_used)
                if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1):
                    test.fail("Error for hugepage usage")
            if test_type == "stress":
                if non_started_free <= started_free:
                    logging.debug("hugepage usage:%d -> %d", non_started_free,
                                  started_free)
                    test.fail("Error for hugepage usage with stress")
            if mb_enable is not True:
                if static_used > 0:
                    test.fail("VM use static hugepage without"
                              " memoryBacking element")
                if thp_enable is not True and started_anon > 0:
                    test.fail("VM use transparent hugepage, while"
                              " it's disabled")
            else:
                if tlbfs_enable is not True:
                    if static_used > 0:
                        test.fail("VM use static hugepage without tlbfs"
                                  " mounted")
                    if thp_enable and started_anon <= 0:
                        test.fail("VM doesn't use transparent"
                                  " hugepage")
                else:
                    if shp_num > 0:
                        if static_used <= 0:
                            test.fail("VM doesn't use static"
                                      " hugepage")
                    else:
                        if static_used > 0:
                            test.fail("VM use static hugepage,"
                                      " while it's set to zero")
                    if thp_enable is not True:
                        if started_anon > 0:
                            test.fail("VM use transparent hugepage,"
                                      " while it's disabled")
                    else:
                        if shp_num == 0 and started_anon <= 0:
                            test.fail("VM doesn't use transparent"
                                      " hugepage, while static"
                                      " hugepage is disabled")
    finally:
        # end up session
        for session in sessions:
            session.close()

        for vm in vms:
            if vm.is_alive():
                vm.destroy()

        for vm_name in vm_names:
            if mb_enable:
                vm_xml.VMXML.del_memoryBacking_tag(vm_name)
            else:
                vm_xml.VMXML.set_memoryBacking_tag(vm_name)

        utils_libvirtd.libvirtd_restart()

        if tlbfs_enable is True:
            if tlbfs_status is not True:
                utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        else:
            if tlbfs_status is True:
                utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_memory.set_num_huge_pages(shp_orig_num)
        utils_memory.set_transparent_hugepage(thp_orig_status)
コード例 #15
0
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep")

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep")

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
                        disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
                        disk_xml.target = {"dev": "sdb", "bus": "usb"}

                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += " drive-usb-disk"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += " mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += " keyboard"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += " tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
コード例 #16
0
            # prepare file for increasing stress
            stress_path = prepare_c_file()
            remote.scp_to_remote(vm.get_address(), 22, 'root',
                                 params.get('password'), stress_path, "/tmp/")
            # increasing workload
            session.cmd("gcc %s -o %s" % (stress_path, target_path))
            session.cmd("%s &" % target_path)

        if test_type == "unixbench":
            params["main_vm"] = vm_name
            params["test_control_file"] = unixbench_control_file

            control_path = os.path.join(test.virtdir, "control",
                                        unixbench_control_file)
            command = utils_test.run_autotest(vm, session, control_path,
                                              None, None,
                                              params, copy_only=True)
            session.cmd("%s &" % command)
            # wait for autotest running on vm
            time.sleep(delay)

            def _is_unixbench_running():
                return (not session.cmd_status("ps -ef | grep perl | grep Run"))
            if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                raise error.TestNAError("Failed to run unixbench in guest.\n"
                                        "so please make sure there are some necessary"
                                        "packages in guest, such as gcc, tar, bzip2")
            logging.debug("Unixbench is already running in VM(s).")

    if test_type == "contrast":
        # wait for vm finish starting completely
コード例 #17
0
ファイル: cpu_hotplug.py プロジェクト: kongove/virt-test
def run_cpu_hotplug(test, params, env):
    """
    Runs CPU hotplug test:

    1) Pick up a living guest
    2) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
    3) Verify if guest has the additional CPUs showing up under
        /sys/devices/system/cpu
    4) Try to bring them online by writing 1 to the 'online' file inside that dir
    5) Run the CPU Hotplug test suite shipped with autotest inside guest

    @param test: KVM test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    current_cpus = int(params.get("smp", 1))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    total_cpus = current_cpus + n_cpus_add

    error.context("cleaning guest dmesg before addition")
    session.cmd("dmesg -c")

    error.context("Adding %d CPUs to guest" % n_cpus_add)
    for i in range(total_cpus):
        vm.monitor.cmd("cpu_set %s online" % i)

    output = vm.monitor.cmd("info cpus")
    logging.debug("Output of info cpus:\n%s", output)

    cpu_regexp = re.compile("CPU #(\d+)")
    total_cpus_monitor = len(cpu_regexp.findall(output))
    if total_cpus_monitor != total_cpus:
        raise error.TestFail("Monitor reports %s CPUs, when VM should have %s" % (total_cpus_monitor, total_cpus))

    dmesg_after = session.cmd("dmesg")
    logging.debug("Guest dmesg output after CPU add:\n%s" % dmesg_after)

    # Verify whether the new cpus are showing up on /sys
    error.context("verifying if new CPUs are showing on guest's /sys dir")
    n_cmd = "find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0 -type d | wc -l"
    output = session.cmd(n_cmd)
    logging.debug("List of cpus on /sys:\n%s" % output)
    try:
        cpus_after_addition = int(output)
    except ValueError:
        logging.error("Output of '%s': %s", n_cmd, output)
        raise error.TestFail("Unable to get CPU count after CPU addition")

    if cpus_after_addition != total_cpus:
        raise error.TestFail(
            "%s CPUs are showing up under "
            "/sys/devices/system/cpu, was expecting %s" % (cpus_after_addition, total_cpus)
        )

    error.context("locating online files for guest's new CPUs")
    r_cmd = "find /sys/devices/system/cpu/cpu[1-99]/online -maxdepth 0 -type f"
    online_files = session.cmd(r_cmd)
    logging.debug("CPU online files detected: %s", online_files)
    online_files = online_files.split().sort()

    if not online_files:
        raise error.TestFail("Could not find CPUs that can be " "enabled/disabled on guest")

    for online_file in online_files:
        cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE)
        cpu_id = cpu_regexp.findall(online_file)[0]
        error.context("changing online status for CPU %s" % cpu_id)
        check_online_status = session.cmd("cat %s" % online_file)
        try:
            check_online_status = int(check_online_status)
        except ValueError:
            raise error.TestFail("Unable to get online status from CPU %s" % cpu_id)
        assert check_online_status in [0, 1]
        if check_online_status == 0:
            error.context("Bringing CPU %s online" % cpu_id)
            session.cmd("echo 1 > %s" % online_file)

    # Now that all CPUs were onlined, let's execute the
    # autotest CPU Hotplug test
    control_path = os.path.join(test.virtdir, "autotest_control", "cpu_hotplug.control")

    timeout = int(params.get("cpu_hotplug_timeout"), 300)
    error.context("running cpu_hotplug autotest after cpu addition")
    utils_test.run_autotest(vm, session, control_path, timeout, test.outputdir, params)

    # Last, but not least, let's offline/online the CPUs in the guest
    # several times
    irq = 15
    irq_mask = "f0"
    for i in xrange(onoff_iterations):
        session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
        for online_file in online_files:
            session.cmd("echo 0 > %s" % online_file)
        for online_file in online_files:
            session.cmd("echo 1 > %s" % online_file)
コード例 #18
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError, e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    raise error.TestFail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError), e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                raise error.TestFail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    raise error.TestNAError("Failed to run unixbench in guest,"
                                            " please make sure some necessary"
                                            " packages are installed in guest,"
                                            " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")
コード例 #19
0
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep"))

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep"))

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2")
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2")
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += (
                            " 0 id=drive-usb-disk%s,if=none,file=%s" %
                            (i, path))

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(
                            **{"attrs": {
                                'file': path
                            }})
                        disk_xml.driver = {
                            "name": "qemu",
                            "type": 'raw',
                            "cache": "none"
                        }
                        disk_xml.target = {"dev": 'sdb', "bus": "usb"}

                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        disk_xml.address = disk_xml.new_disk_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        mouse_xml.address = mouse_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        tablet_xml.address = tablet_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        kbd_xml.address = kbd_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += (" drive-usb-disk")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += (" mouse")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += (" keyboard")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += (" tablet")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise error.CmdError(result.command, result)
        except error.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n"
                                     "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
コード例 #20
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run iozone on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    iozone_control_file = params.get("iozone_control_file", "iozone.control")
    timeout = int(params.get("LB_domstate_with_iozone_loop_time", "600"))
    # Run iozone on guest.
    params["test_control_file"] = iozone_control_file
    # Fork a new process to run iozone on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    iozone_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_iozone_running():
            return (not session.cmd_status("ps -ef|grep iozone|grep -v grep"))

        if not utils_misc.wait_for(_is_iozone_running, timeout=120):
            test.cancel("Failed to run iozone in guest.\n"
                        "Since we need to run a autotest of iozone "
                        "in guest, so please make sure there are "
                        "some necessary packages in guest,"
                        "such as gcc, tar, bzip2")
    logging.debug("Iozone is already running in VMs.")

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()

        # Reboot vms after func_in_thread to check vm is running normally.
        for vm in vms:
            vm.reboot()
    finally:
        # Clean up.
        logging.debug("No cleaning operation for this test.")
コード例 #21
0
ファイル: cpu_hotplug.py プロジェクト: kmaehara/virt-test
def run_cpu_hotplug(test, params, env):
    """
    Runs CPU hotplug test:

    1) Pick up a living guest
    2) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
    3) Verify if guest has the additional CPUs showing up under
        /sys/devices/system/cpu
    4) Try to bring them online by writing 1 to the 'online' file inside that dir
    5) Run the CPU Hotplug test suite shipped with autotest inside guest

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    current_cpus = int(params.get("smp", 1))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    total_cpus = current_cpus + n_cpus_add

    error.context("cleaning guest dmesg before addition")
    session.cmd("dmesg -c")

    error.context("Adding %d CPUs to guest" % n_cpus_add)
    for i in range(total_cpus):
        vm.monitor.cmd("cpu_set %s online" % i)

    output = vm.monitor.cmd("info cpus")
    logging.debug("Output of info cpus:\n%s", output)

    cpu_regexp = re.compile("CPU #(\d+)")
    total_cpus_monitor = len(cpu_regexp.findall(output))
    if total_cpus_monitor != total_cpus:
        raise error.TestFail(
            "Monitor reports %s CPUs, when VM should have %s" %
            (total_cpus_monitor, total_cpus))

    dmesg_after = session.cmd("dmesg")
    logging.debug("Guest dmesg output after CPU add:\n%s" % dmesg_after)

    # Verify whether the new cpus are showing up on /sys
    error.context("verifying if new CPUs are showing on guest's /sys dir")
    n_cmd = 'find /sys/devices/system/cpu/cpu[0-99] -maxdepth 0 -type d | wc -l'
    output = session.cmd(n_cmd)
    logging.debug("List of cpus on /sys:\n%s" % output)
    try:
        cpus_after_addition = int(output)
    except ValueError:
        logging.error("Output of '%s': %s", n_cmd, output)
        raise error.TestFail("Unable to get CPU count after CPU addition")

    if cpus_after_addition != total_cpus:
        raise error.TestFail("%s CPUs are showing up under "
                             "/sys/devices/system/cpu, was expecting %s" %
                             (cpus_after_addition, total_cpus))

    error.context("locating online files for guest's new CPUs")
    r_cmd = 'find /sys/devices/system/cpu/cpu[1-99]/online -maxdepth 0 -type f'
    online_files = session.cmd(r_cmd)
    logging.debug("CPU online files detected: %s", online_files)
    online_files = online_files.split().sort()

    if not online_files:
        raise error.TestFail("Could not find CPUs that can be "
                             "enabled/disabled on guest")

    for online_file in online_files:
        cpu_regexp = re.compile("cpu(\d+)", re.IGNORECASE)
        cpu_id = cpu_regexp.findall(online_file)[0]
        error.context("changing online status for CPU %s" % cpu_id)
        check_online_status = session.cmd("cat %s" % online_file)
        try:
            check_online_status = int(check_online_status)
        except ValueError:
            raise error.TestFail("Unable to get online status from CPU %s" %
                                 cpu_id)
        assert (check_online_status in [0, 1])
        if check_online_status == 0:
            error.context("Bringing CPU %s online" % cpu_id)
            session.cmd("echo 1 > %s" % online_file)

    # Now that all CPUs were onlined, let's execute the
    # autotest CPU Hotplug test
    control_path = os.path.join(test.virtdir, "autotest_control",
                                "cpu_hotplug.control")

    timeout = int(params.get("cpu_hotplug_timeout", 300))
    error.context("running cpu_hotplug autotest after cpu addition")
    utils_test.run_autotest(vm, session, control_path, timeout, test.outputdir,
                            params)

    # Last, but not least, let's offline/online the CPUs in the guest
    # several times
    irq = 15
    irq_mask = "f0"
    for i in xrange(onoff_iterations):
        session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
        for online_file in online_files:
            session.cmd("echo 0 > %s" % online_file)
        for online_file in online_files:
            session.cmd("echo 1 > %s" % online_file)
コード例 #22
0
            # Try to install gcc on guest first
            utils_misc.yum_install(["gcc"], session, 360)
            # increasing workload
            session.cmd("gcc %s -o %s" % (stress_path, target_path))
            session.cmd("%s &" % target_path)

        if test_type == "unixbench":
            params["main_vm"] = vm_name
            params["test_control_file"] = unixbench_control_file

            control_path = os.path.join(test.virtdir, "control",
                                        unixbench_control_file)
            command = utils_test.run_autotest(vm,
                                              session,
                                              control_path,
                                              None,
                                              None,
                                              params,
                                              copy_only=True)
            session.cmd("%s &" % command)
            # wait for autotest running on vm
            time.sleep(delay)

            def _is_unixbench_running():
                return (
                    not session.cmd_status("ps -ef | grep perl | grep Run"))

            if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                raise error.TestNAError(
                    "Failed to run unixbench in guest.\n"
                    "so please make sure there are some necessary"