コード例 #1
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Divide vms into two groups and run sub test for each group.
    3) clean up.
    """
    # Get VMs.
    vms = env.get_all_vms()
    if len(vms) < 2:
        test.cancel("We need at least 2 vms for this test.")
    timeout = params.get("LB_domstate_switch_loop_time", 600)
    # Divide vms into two groups.
    odd_group_vms = []
    even_group_vms = []
    for index in range(len(vms)):
        if (index % 2):
            even_group_vms.append(vms[index])
        else:
            odd_group_vms.append(vms[index])

    # Run sub test for odd_group_vms.
    odd_env = env.copy()
    # Unregister vm which belong to even_group from odd_env.
    for vm in even_group_vms:
        odd_env.unregister_vm(vm.name)
    odd_bt = utils_test.BackgroundTest(
        utils_test.run_virt_sub_test,
        params=[
            test, params, odd_env, "libvirt_bench_domstate_switch_in_loop"
        ])
    odd_bt.start()

    # Run sub test for even_group_vms.
    even_env = env.copy()
    # Unregister vm which belong to odd_group from even_env.
    for vm in odd_group_vms:
        even_env.unregister_vm(vm.name)
    even_bt = utils_test.BackgroundTest(
        utils_test.run_virt_sub_test,
        params=[
            test, params, even_env, "libvirt_bench_domstate_switch_in_loop"
        ])
    even_bt.start()

    # Wait for background_tests joining.
    err_msg = ""
    try:
        odd_bt.join(int(timeout) * 2)
    except exceptions.TestFail as detail:
        err_msg += ("Group odd_group failed to run sub test.\n"
                    "Detail: %s." % detail)
    try:
        even_bt.join(int(timeout) * 2)
    except exceptions.TestFail as detail:
        err_msg += ("Group even_group failed to run sub test.\n"
                    "Detail: %s." % detail)
    if err_msg:
        test.fail(err_msg)
コード例 #2
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run netperf on guest.
    3) Dump each VM and check result.
    3) Clean up.
    """
    vms = env.get_all_vms()
    netperf_control_file = params.get("netperf_controle_file",
                                      "netperf.control")
    # Run netperf on guest.
    guest_netperf_bts = []
    params["test_control_file"] = netperf_control_file
    # Fork a new process to run netperf on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    netperf_control_file)
        session = vm.wait_for_login()
        bt = utils_test.BackgroundTest(
            utils_test.run_autotest,
            [vm, session, control_path, None, None, params])
        bt.start()
        guest_netperf_bts.append(bt)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_netperf_running():
            return (not session.cmd_status(
                "cat /usr/local/autotest/results/default/debug/client.DEBUG|"
                "grep \"seconds remaining\""))

        if not utils_misc.wait_for(_is_netperf_running, timeout=120):
            test.cancel("Failed to run netperf in guest.\n"
                        "Since we need to run a autotest of netperf "
                        "in guest, so please make sure there are some "
                        "necessary packages in guest, such as gcc, tar, bzip2")

    logging.debug("Netperf is already running in VMs.")

    try:
        dump_path = os.path.join(data_dir.get_tmp_dir(), "dump_file")
        for vm in vms:
            vm.dump(dump_path)
            # Check the status after vm.dump()
            if not vm.is_alive():
                test.fail("VM is shutoff after dump.")
            if vm.wait_for_shutdown():
                test.fail("VM is going to shutdown after dump.")
            # Check VM is running normally.
            vm.wait_for_login()
    finally:
        # Destroy VM.
        for vm in vms:
            vm.destroy()
        for bt in guest_netperf_bts:
            bt.join(ignore_status=True)
コード例 #3
0
 def create_snapshot(self):
     error_context.context("do snaoshot during guest rebooting",
                           logging.info)
     bg_test = utils_test.BackgroundTest(self.vm_reset, "")
     bg_test.start()
     logging.info("sleep random time to perform before snapshot")
     time.sleep(random.randint(0, 10))
     super(BlockdevSnapshotRebootTest, self).create_snapshot()
コード例 #4
0
 def reboot_test():
     try:
         bg = utils_test.BackgroundTest(vm.reboot, (session, ))
         logging.info("Rebooting guest ...")
         bg.start()
         sleep_time = int(params.get("sleep_time"))
         time.sleep(sleep_time)
         create_snapshot(vm)
     finally:
         bg.join()
コード例 #5
0
def run(test, params, env):
    """
    Run guest suspend under guest nic stress

    1) Boot up VM, and login guest
    2) Run bg_stress_test(pktgen, netperf or file copy) if needed
    3) Do guest suspend and resume test

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    error_context.context("Init guest and try to login", logging.info)
    login_timeout = int(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    vm.wait_for_login(timeout=login_timeout)

    bg_stress_test = params.get("run_bgstress")
    try:
        if bg_stress_test:
            error_context.context("Run test %s background" % bg_stress_test,
                                  logging.info)
            stress_thread = ""
            wait_time = float(params.get("wait_bg_time", 60))
            bg_stress_run_flag = params.get("bg_stress_run_flag")
            env[bg_stress_run_flag] = False
            stress_thread = utils_misc.InterruptedThread(
                utils_test.run_virt_sub_test, (test, params, env),
                {"sub_type": bg_stress_test})
            stress_thread.start()
            if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag),
                                       wait_time, 0, 5,
                                       "Wait %s test start" % bg_stress_test):
                test.error("Run stress test error")

        suspend_type = params.get("guest_suspend_type")
        error_context.context("Run suspend '%s' test under stress"
                              % suspend_type, logging.info)
        bg_cmd = guest_suspend.run
        args = (test, params, env)
        bg = utils_test.BackgroundTest(bg_cmd, args)
        bg.start()
        if bg.is_alive():
            try:
                env[bg_stress_run_flag] = False
                bg.join()
            except Exception as e:
                err_msg = "Run guest suspend: '%s' error!\n" % suspend_type
                err_msg += "Error info: '%s'" % e
                test.fail(err_msg)

    finally:
        env[bg_stress_run_flag] = False
コード例 #6
0
    def load_stress():
        """
        Load background IO/CPU/Memory stress in guest

        """
        error_context.context("launch stress app in guest", test.log.info)
        args = (test, params, env, params["stress_test"])
        bg_test = utils_test.BackgroundTest(utils_test.run_virt_sub_test, args)
        bg_test.start()
        if not utils_misc.wait_for(
                bg_test.is_alive, first=10, step=3, timeout=100):
            test.fail("background test start failed")
コード例 #7
0
 def load_stress(self):
     """
     load IO/CPU/Memory stress in guest;
     """
     error.context("launch stress app in guest", logging.info)
     args = (self.test, self.params, self.env, self.params["stress_test"])
     bg_test = utils_test.BackgroundTest(utils_test.run_virt_sub_test, args)
     bg_test.start()
     if not utils_misc.wait_for(bg_test.is_alive, first=10, step=3, timeout=100):
         raise error.TestFail("background test start failed")
     if not utils_misc.wait_for(self.stress_app_running, timeout=360, step=5):
         raise error.TestFail("stress app isn't running")
コード例 #8
0
 def create_snapshot(self):
     bg_test = utils_test.BackgroundTest(self.scp_test, "")
     bg_test.start()
     logging.info("Sleep some time to wait for scp's preparation done")
     time.sleep(30)
     error_context.context("freeze guest before snapshot",
                           logging.info)
     self.guest_agent.fsfreeze()
     super(BlockdevSnapshotGuestAgentTest, self).create_snapshot()
     error_context.context("thaw guest after snapshot",
                           logging.info)
     self.guest_agent.fsthaw()
     bg_test.join()
コード例 #9
0
 def run_test(self):
     self.pre_test()
     try:
         bg_test = utils_test.BackgroundTest(self.fio_thread, "")
         bg_test.start()
         logging.info("sleep random time before commit during fio")
         mint = self.params.get_numeric("sleep_min")
         maxt = self.params.get_numeric("sleep_max")
         time.sleep(random.randint(mint, maxt))
         self.commit_snapshots()
         self.verify_data_file()
         bg_test.join()
     finally:
         self.post_test()
コード例 #10
0
 def load_stress(self):
     """
     load IO/CPU/Memoery stress in guest;
     """
     error_context.context("launch stress app in guest", logging.info)
     args = (self.test, self.params, self.env, self.params["stress_test"])
     bg_test = utils_test.BackgroundTest(utils_test.run_virt_sub_test, args)
     bg_test.start()
     if not utils_misc.wait_for(bg_test.is_alive, first=10, step=3, timeout=100):
         self.test.fail("background test start failed")
     if not utils_misc.wait_for(self.app_running, timeout=360, step=5):
         self.test.fail("stress app isn't running")
     # sleep 10s to ensure heavyload.exe make guest under heayload really;
     time.sleep(10)
     return None
コード例 #11
0
def run(test, params, env):
    """
    Backup VM disk test when VM reboot

    1) Install guest
    2) Do snapshot during guest
    3) Rebase snapshot to base after installation finished
    4) Start guest with snapshot
    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def tag_for_install(vm, tag):
        if vm.serial_console:
            serial_output = vm.serial_console.get_output()
            if serial_output and re.search(tag, serial_output, re.M):
                return True
        logging.info("VM has not started yet")
        return False

    base_image = params.get("images", "image1").split()[0]
    params.update({"image_format_%s" % base_image: params["image_format"]})
    snapshot_test = BlockDevSnapshotTest(test, params, env)
    args = (test, params, env)
    bg = utils_test.BackgroundTest(unattended_install.run, args)
    bg.start()
    if bg.is_alive():
        tag = params["tag_for_install_start"]
        if utils_misc.wait_for(
                lambda: tag_for_install(snapshot_test.main_vm, tag), 120, 10,
                5):
            logging.info("sleep random time before do snapshots")
            time.sleep(random.randint(120, 600))
            snapshot_test.pre_test()
            try:
                snapshot_test.create_snapshot()
                try:
                    bg.join(timeout=1200)
                except Exception:
                    raise
                snapshot_test.verify_snapshot()
                snapshot_test.clone_vm.wait_for_login()
            finally:
                snapshot_test.post_test()
        else:
            test.fail("Failed to install guest")
    else:
        test.fail("Background process:installation not started")
コード例 #12
0
    def _install_vm_in_background(self):
        """Install VM in background"""
        self.main_vm = self.env.get_vm(self.params["main_vm"])
        args = (self.test, self.params, self.env)
        self._bg = utils_test.BackgroundTest(unattended_install.run, args)
        self._bg.start()

        logging.info("Wait till '%s'" % self.params["tag_for_install_start"])
        if utils_misc.wait_for(
                lambda: self._is_install_started(self.params[
                    "tag_for_install_start"]),
                int(self.params.get("timeout_for_install_start", 360)), 10, 5):
            logging.info("Sleep some time before block-mirror")
            time.sleep(random.randint(10, 120))
        else:
            self.test.fail("Failed to start VM installation")
コード例 #13
0
 def file_transfer_test():
     try:
         bg_cmd = file_transfer.run_file_transfer
         args = (test, params, env)
         bg = utils_test.BackgroundTest(bg_cmd, args)
         bg.start()
         sleep_time = int(params.get("sleep_time"))
         time.sleep(sleep_time)
         create_snapshot(vm)
         if bg.is_alive():
             try:
                 bg.join()
             except Exception:
                 raise
     finally:
         session.close()
コード例 #14
0
    def run_backgroud_process(session, bg_cmd):
        """
        run a background process.

        :param session: A shell session object.
        :param bg_cmd: run it in background.
        :return: background thread
        """
        error_context.context("Start a background process: '%s'" % bg_cmd,
                              logging.info)
        args = (bg_cmd, 360)
        bg = utils_test.BackgroundTest(session.cmd, args)
        bg.start()
        if not utils_misc.wait_for(lambda: bg.is_alive, 60):
            test.fail("Failed to start background process: '%s'" % bg_cmd)
        return bg
コード例 #15
0
def run(test, params, env):
    """
    Block commit base Test

    1. Install guest
    2. create 4 snapshots during guest installation
    3. commit snapshot 3 to base
    4. installation can be finished after commit
    """
    def tag_for_install(vm, tag):
        if vm.serial_console:
            serial_output = vm.serial_console.get_output()
            if tag in serial_output:
                return True
        logging.info("vm has not started yet")
        return False

    block_test = BlockdevCommitInstall(test, params, env)
    args = (test, params, env)
    bg = utils_test.BackgroundTest(unattended_install.run, args)
    bg.start()
    if bg.is_alive():
        tag = params.get("tag_for_install_start", "Starting Login Service")
        if utils_misc.wait_for(
                lambda: tag_for_install(block_test.main_vm, tag), 240, 10, 5):
            logging.info("sleep random time before do snapshots")
            time.sleep(random.randint(10, 120))
            block_test.pre_test()
            try:
                block_test.commit_snapshots()
                try:
                    bg.join(timeout=1200)
                except Exception:
                    raise
                reboot_method = params.get("reboot_method", "system_reset")
                block_test.main_vm.reboot(method=reboot_method)
            finally:
                block_test.post_test()
        else:
            test.fail("Failed to install guest")
    else:
        test.fail("Installation failed to start")
コード例 #16
0
    def runtime_test():
        try:
            clean_cmd = params.get("clean_cmd")
            file_create = params.get("file_create")
            clean_cmd += " %s" % file_create
            logging.info("Clean file before creation")
            session.cmd(clean_cmd)

            logging.info("Creating big file...")
            create_cmd = params.get("create_cmd") % file_create

            args = (create_cmd, dd_timeout)
            bg = utils_test.BackgroundTest(session.cmd_output, args)
            bg.start()
            time.sleep(5)
            create_snapshot(vm)
            if bg.is_alive():
                try:
                    bg.join()
                except Exception:
                    raise
        finally:
            session.close()
コード例 #17
0
ファイル: migration.py プロジェクト: yanglei-rh/tp-qemu
    def guest_stress_start(guest_stress_test):
        """
        Start a stress test in guest, Could be 'iozone', 'dd', 'stress'

        :param type: type of stress test.
        """
        from generic.tests import autotest_control

        timeout = 0

        if guest_stress_test == "autotest":
            test_type = params.get("test_type")
            func = autotest_control.run
            new_params = params.copy()
            new_params["test_control_file"] = "%s.control" % test_type

            args = (test, new_params, env)
            timeout = 60
        elif guest_stress_test == "dd":
            vm = env.get_vm(env, params.get("main_vm"))
            vm.verify_alive()
            session = vm.wait_for_login(timeout=login_timeout)
            func = session.cmd_output
            args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M "
                    "count=100; rm -f /tmp/test; done", login_timeout,
                    logging.info)

        logging.info("Start %s test in guest", guest_stress_test)
        bg = utils_test.BackgroundTest(func, args)
        params["guest_stress_test_pid"] = bg
        bg.start()
        if timeout:
            logging.info("sleep %ds waiting guest test start.", timeout)
            time.sleep(timeout)
        if not bg.is_alive():
            test.fail("Failed to start guest test!")
コード例 #18
0
ファイル: migration.py プロジェクト: deanspring/FT4VM-L1_test
def run_migration(test, params, env):
    """
    KVM migration test:
    1) Get a live VM and clone it.
    2) Verify that the source VM supports migration.  If it does, proceed with
            the test.
    3) Send a migration command to the source VM and wait until it's finished.
    4) Kill off the source VM.
    3) Log into the destination VM after the migration is finished.
    4) Compare the output of a reference command executed on the source with
            the output of the same command on the destination machine.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def guest_stress_start(guest_stress_test):
        """
        Start a stress test in guest, Could be 'iozone', 'dd', 'stress'

        :param type: type of stress test.
        """
        from tests import autotest_control

        timeout = 0

        if guest_stress_test == "autotest":
            test_type = params.get("test_type")
            func = autotest_control.run_autotest_control
            new_params = params.copy()
            new_params["test_control_file"] = "%s.control" % test_type

            args = (test, new_params, env)
            timeout = 60
        elif guest_stress_test == "dd":
            vm = env.get_vm(env, params.get("main_vm"))
            vm.verify_alive()
            session = vm.wait_for_login(timeout=login_timeout)
            func = session.cmd_output
            args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M "
                    "count=100; rm -f /tmp/test; done", login_timeout,
                    logging.info)

        logging.info("Start %s test in guest", guest_stress_test)
        bg = utils_test.BackgroundTest(func, args)
        params["guest_stress_test_pid"] = bg
        bg.start()
        if timeout:
            logging.info("sleep %ds waiting guest test start.", timeout)
            time.sleep(timeout)
        if not bg.is_alive():
            raise error.TestFail("Failed to start guest test!")

    def guest_stress_deamon():
        """
        This deamon will keep watch the status of stress in guest. If the stress
        program is finished before migration this will restart it.
        """
        while True:
            bg = params.get("guest_stress_test_pid")
            action = params.get("action")
            if action == "run":
                logging.debug("Check if guest stress is still running")
                guest_stress_test = params.get("guest_stress_test")
                if bg and not bg.is_alive():
                    logging.debug("Stress process finished, restart it")
                    guest_stress_start(guest_stress_test)
                    time.sleep(30)
                else:
                    logging.debug("Stress still on")
            else:
                if bg and bg.is_alive():
                    try:
                        stress_stop_cmd = params.get("stress_stop_cmd")
                        vm = env.get_vm(env, params.get("main_vm"))
                        vm.verify_alive()
                        session = vm.wait_for_login()
                        if stress_stop_cmd:
                            logging.warn(
                                "Killing background stress process "
                                "with cmd '%s', you would see some "
                                "error message in client test result,"
                                "it's harmless.", stress_stop_cmd)
                            session.cmd(stress_stop_cmd)
                        bg.join(10)
                    except Exception:
                        pass
                break
            time.sleep(10)

    def get_functions(func_names, locals_dict):
        """
        Find sub function(s) in this function with the given name(s).
        """
        if not func_names:
            return []
        funcs = []
        for f in func_names.split():
            f = locals_dict.get(f)
            if isinstance(f, types.FunctionType):
                funcs.append(f)
        return funcs

    def mig_set_speed():
        mig_speed = params.get("mig_speed", "1G")
        return vm.monitor.migrate_set_speed(mig_speed)

    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")
    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
    mig_exec_cmd_src = params.get("migration_exec_cmd_src")
    mig_exec_cmd_dst = params.get("migration_exec_cmd_dst")
    if mig_exec_cmd_src and "gzip" in mig_exec_cmd_src:
        mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec")
        mig_exec_file += "-%s" % utils_misc.generate_random_string(8)
        mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file
        mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file
    offline = params.get("offline", "no") == "yes"
    check = params.get("vmstate_check", "no") == "yes"
    living_guest_os = params.get("migration_living_guest", "yes") == "yes"
    deamon_thread = None

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    if living_guest_os:

        session = vm.wait_for_login(timeout=login_timeout)

        # Get the output of migration_test_command
        test_command = params.get("migration_test_command")
        reference_output = session.cmd_output(test_command)

        # Start some process in the background (and leave the session open)
        background_command = params.get("migration_bg_command", "")
        session.sendline(background_command)
        time.sleep(5)

        # Start another session with the guest and make sure the background
        # process is running
        session2 = vm.wait_for_login(timeout=login_timeout)

        try:
            check_command = params.get("migration_bg_check_command", "")
            session2.cmd(check_command, timeout=30)
            session2.close()

            # run some functions before migrate start.
            pre_migrate = get_functions(params.get("pre_migrate"), locals())
            for func in pre_migrate:
                func()

            # Start stress test in guest.
            guest_stress_test = params.get("guest_stress_test")
            if guest_stress_test:
                guest_stress_start(guest_stress_test)
                params["action"] = "run"
                deamon_thread = utils_test.BackgroundTest(
                    guest_stress_deamon, ())
                deamon_thread.start()

            # Migrate the VM
            ping_pong = params.get("ping_pong", 1)
            for i in xrange(int(ping_pong)):
                if i % 2 == 0:
                    logging.info("Round %s ping..." % str(i / 2))
                else:
                    logging.info("Round %s pong..." % str(i / 2))
                vm.migrate(mig_timeout,
                           mig_protocol,
                           mig_cancel_delay,
                           offline,
                           check,
                           migration_exec_cmd_src=mig_exec_cmd_src,
                           migration_exec_cmd_dst=mig_exec_cmd_dst)

            # Set deamon thread action to stop after migrate
            params["action"] = "stop"

            # run some functions after migrate finish.
            post_migrate = get_functions(params.get("post_migrate"), locals())
            for func in post_migrate:
                func()

            # Log into the guest again
            logging.info("Logging into guest after migration...")
            session2 = vm.wait_for_login(timeout=30)
            logging.info("Logged in after migration")

            # Make sure the background process is still running
            session2.cmd(check_command, timeout=30)

            # Get the output of migration_test_command
            output = session2.cmd_output(test_command)

            # Compare output to reference output
            if output != reference_output:
                logging.info("Command output before migration differs from "
                             "command output after migration")
                logging.info("Command: %s", test_command)
                logging.info(
                    "Output before:" +
                    utils_misc.format_str_for_message(reference_output))
                logging.info("Output after:" +
                             utils_misc.format_str_for_message(output))
                raise error.TestFail("Command '%s' produced different output "
                                     "before and after migration" %
                                     test_command)

        finally:
            # Kill the background process
            if session2 and session2.is_alive():
                session2.cmd_output(params.get("migration_bg_kill_command",
                                               ""))
            if deamon_thread is not None:
                # Set deamon thread action to stop after migrate
                params["action"] = "stop"
                deamon_thread.join()

        session2.close()
        session.close()
    else:
        # Just migrate without depending on a living guest OS
        vm.migrate(mig_timeout,
                   mig_protocol,
                   mig_cancel_delay,
                   offline,
                   check,
                   migration_exec_cmd_src=mig_exec_cmd_src,
                   migration_exec_cmd_dst=mig_exec_cmd_dst)
コード例 #19
0
ファイル: whql_env_setup.py プロジェクト: bingbu/virt-test
def run_whql_env_setup(test, params, env):
    """
    KVM whql env setup test:
    1) Log into a guest
    2) Update Windows kernel to the newest version
    3) Un-check Automatically restart in system failure
    4) Disable UAC
    5) Get the symbol files
    6) Set VM to physical memory + 100M
    7) Update the nic configuration
    8) Install debug view and make it auto run

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    log_path = "%s/../debug" % test.resultsdir
    # Prepare the tools iso
    error.context("Prepare the tools iso", logging.info)
    src_list = params.get("src_list")
    src_path = params.get("src_path", "%s/whql_src" % test.tmpdir)
    if not os.path.exists(src_path):
        os.makedirs(src_path)
    if src_list is not None:
        for i in re.split(",", src_list):
            utils.unmap_url(src_path, i, src_path)

    # Make iso for src
    cdrom_whql = params.get("cdrom_whql")
    cdrom_whql = utils_misc.get_path(data_dir.get_data_dir(), cdrom_whql)
    cdrom_whql_dir = os.path.split(cdrom_whql)[0]
    if not os.path.exists(cdrom_whql_dir):
        os.makedirs(cdrom_whql_dir)
    cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path)
    utils.system(cmd)
    params["cdroms"] += " whql"

    vm = "vm1"
    vm_params = params.object_params(vm)
    env_process.preprocess_vm(test, vm_params, env, vm)
    vm = env.get_vm(vm)

    timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=timeout)
    error_log = utils_misc.get_path(log_path, "whql_setup_error_log")
    run_guest_log = params.get("run_guest_log",
                               "%s/whql_qemu_comman" % test.tmpdir)

    # Record qmmu command line in a log file
    error.context("Record qemu command line", logging.info)
    if os.path.isfile(run_guest_log):
        fd = open(run_guest_log, "r+")
        fd.read()
    else:
        fd = open(run_guest_log, "w")
    fd.write("%s\n" % vm.qemu_command)
    fd.close()

    # Get set up commands
    update_cmd = params.get("update_cmd", "")
    timezone_cmd = params.get("timezone_cmd", "")
    auto_restart = params.get("auto_restart", "")
    qxl_install = params.get("qxl_install", "")
    debuggers_install = params.get("debuggers_install", "")
    disable_uas = params.get("disable_uas", "")
    symbol_files = params.get("symbol_files", "")
    vm_size = int(params.get("mem")) + 100
    nic_cmd = params.get("nic_config_cmd", "")
    dbgview_cmd = params.get("dbgview_cmd", "")
    format_cmd = params.get("format_cmd", "")
    disable_firewall = params.get("disable_firewall", "")
    disable_update = params.get("disable_update", "")
    setup_timeout = int(params.get("setup_timeout", "7200"))
    disk_init_cmd = params.get("disk_init_cmd", "")
    disk_driver_install = params.get("disk_driver_install", "")

    vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False"
    vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set "
    vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size)
    vm_ma_cmd = ""
    vm_cmd = ""
    if symbol_files:
        symbol_cmd = "del  C:\\\\symbols &&"
        symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \
                      symbol_files
    else:
        symbol_cmd = ""
    wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic"

    error.context("Configure guest system", logging.info)
    cmd_list = [
        wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd, vm_ma_cmd,
        vm_cmd, dbgview_cmd, qxl_install, disable_firewall, timezone_cmd
    ]
    if nic_cmd:
        for index, nic in enumerate(re.split("\s+", params.get("nics"))):
            setup_params = params.get("nic_setup_params_%s" % nic, "")
            if params.get("platform", "") == "x86_64":
                nic_cmd = re.sub("set", "set_64", nic_cmd)
            cmd_list.append("%s %s %s" %
                            (nic_cmd, str(index + 1), setup_params))
    if disk_init_cmd:
        disk_num = len(re.split("\s+", params.get("images")))
        if disk_driver_install:
            cmd_list.append(disk_driver_install + str(disk_num - 1))
        labels = "IJKLMNOPQRSTUVWXYZ"
        for index, images in enumerate(re.split("\s+", params.get("images"))):
            if index > 0:
                cmd_list.append(disk_init_cmd %
                                (str(index), labels[index - 1]))
                format_cmd_image = format_cmd % (
                    labels[index - 1], params.get("win_format_%s" % images))
                if params.get("win_extra_%s" % images):
                    format_cmd_image += " %s" % params.get(
                        "win_extra_%s" % images)
                cmd_list.append(format_cmd_image)

    cmd_list += [update_cmd, disable_update]

    failed_flag = 0

    # Check symbol files in guest
    if symbol_files:
        error.context("Update symbol files", logging.info)
        install_check_tool = False
        check_tool_chk = params.get("check_tool_chk",
                                    "C:\debuggers\symchk.exe")
        output = session.cmd_output(check_tool_chk)
        if "cannot find" in output:
            install_check_tool = True

        if install_check_tool:
            output = session.cmd_output(debuggers_install)
        symbol_file_check = params.get("symbol_file_check")
        symbol_file_download = params.get("symbol_file_download")

        symbol_check_pattern = params.get("symbol_check_pattern")
        symbol_pid_pattern = params.get("symbol_pid_pattern")
        download = utils_test.BackgroundTest(
            session.cmd, (symbol_file_download, setup_timeout))

        sessioncheck = vm.wait_for_login(timeout=timeout)
        download.start()
        while download.is_alive():
            o = sessioncheck.cmd_output(symbol_file_check, setup_timeout)
            if symbol_check_pattern in o:
                # Check is done kill download process
                cmd = "tasklist /FO list"
                s, o = sessioncheck.cmd_status_output(cmd)
                pid = re.findall(symbol_pid_pattern, o, re.S)
                if pid:
                    cmd = "taskkill /PID %s /F" % pid[0]
                    try:
                        sessioncheck.cmd(cmd)
                    except Exception:
                        pass
                    break
            time.sleep(5)
        sessioncheck.close()
        download.join()

    for cmd in cmd_list:
        if len(cmd) > 0:
            s = 0
            try:
                s, o = session.cmd_status_output(cmd, timeout=setup_timeout)
            except Exception, err:
                failed_flag += 1
                utils_misc.log_line(error_log,
                                    "Unexpected exception: %s" % err)
            if s != 0:
                failed_flag += 1
                utils_misc.log_line(error_log, o)
コード例 #20
0
ファイル: memhp_threads.py プロジェクト: liuyd96/tp-qemu
def run(test, params, env):
    """
    prealloc-threads test:
    1) Boot guest in paused status
    2) Get qemu threads number
    3) Hotplug memory backend with a large size and option prealloc-threads
    4) Get qemu threads number during step 3
    5) Check if qemu threads number in step 4 is expected, if not, fail test
    6) Otherwise, hotplug pc-dimm device
    7) Resume vm
    8) Check guest memory

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_qemu_threads(cmd, timeout=60):
        """
        Get qemu threads when it's stable
        """
        threads = 0
        start_time = time.time()
        end_time = time.time() + float(timeout)
        while time.time() < end_time:
            cur_threads = int(process.system_output(cmd, shell=True))
            if cur_threads != threads:
                threads = cur_threads
                time.sleep(1)
            else:
                return threads
        test.error("Can't get stable qemu threads number in %ss." % timeout)

    vm = env.get_vm(params["main_vm"])
    logging.info("Get qemu threads number at beginning")
    get_threads_cmd = params["get_threads_cmd"] % vm.get_pid()
    pre_threads = get_qemu_threads(get_threads_cmd)
    mem = params.get("target_mems")
    new_params = params.object_params(mem).object_params("mem")
    dev = Memory(new_params["backend"], new_params)
    dev.set_param("id", "%s-%s" % ("mem", mem))
    args = [vm.monitor, vm.devices.qemu_version]
    bg = utils_test.BackgroundTest(dev.hotplug, args)
    logging.info("Hotplug memory backend '%s' to guest", dev["id"])
    bg.start()
    threads_num = int(new_params["prealloc-threads"])
    logging.info("Get qemu threads number again")
    post_threads = get_qemu_threads(get_threads_cmd)
    if post_threads - pre_threads != threads_num:
        test.fail("QEMU threads number is not right, pre is %s, post is %s" %
                  (pre_threads, post_threads))
    bg.join()
    memhp_test = MemoryHotplugTest(test, params, env)
    memhp_test.update_vm_after_hotplug(vm, dev)
    dimm = vm.devices.dimm_device_define_by_params(params.object_params(mem),
                                                   mem)
    dimm.set_param("memdev", dev["id"])
    logging.info("Hotplug pc-dimm '%s' to guest", dimm["id"])
    vm.devices.simple_hotplug(dimm, vm.monitor)
    memhp_test.update_vm_after_hotplug(vm, dimm)
    logging.info("Resume vm and check memory inside guest")
    vm.resume()
    memhp_test.check_memory(vm)
コード例 #21
0
def run(test, params, env):
    """
    Verify the "-debugcon" parameter under the UEFI environment:
    1) Boot up a guest.
       If params["ovmf_log"] is not None,
       append debugcon parameter to qemu command lines.
    2) Remove the existing isa-log device.
    3) Destroy the guest.
    4) Start the trace command on host.
    5) Re-create the guest and verify it is alive.
    6) Destroy the guest.
    7) Check pio_read counts and pio_write counts.
    7.1) If disable debugcon:
            pio_read_counts > 0
            pio_write_counts = 0
    7.2) If enable debugcon:
            pio_read_counts > 0
            pio_write_counts > 0

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_trace_process():
        """
        check whether trace process is existing
        """
        if process.system(
                params["grep_trace_cmd"], ignore_status=True, shell=True):
            return False
        else:
            return True

    def remove_isa_debugcon(vm):
        """
        remove the existing isa-log device
        """
        for device in vm.devices:
            if device.type == "isa-log":
                vm.devices.remove(device)
                break
        env.register_vm(vm.name, vm)

    def trace_kvm_pio():
        """
        trace event kvm_pio
        """
        process.system(trace_record_cmd)

    # install trace-cmd in host
    utils_package.package_install("trace-cmd")
    if params.get("ovmf_log"):
        error_context.context("Append debugcon parameter to "
                              "qemu command lines.", test.log.info)
        ovmf_log = utils_misc.get_path(test.debugdir, params["ovmf_log"])
        params["extra_params"] %= ovmf_log
        params["start_vm"] = "yes"
        env_process.process(test, params, env,
                            env_process.preprocess_image,
                            env_process.preprocess_vm)
    trace_output_file = utils_misc.get_path(test.debugdir,
                                            params["trace_output"])
    trace_record_cmd = params["trace_record_cmd"] % trace_output_file
    check_pio_read = params["check_pio_read"] % trace_output_file
    check_pio_write = params["check_pio_write"] % trace_output_file
    stop_trace_record = params["stop_trace_record"]
    timeout = int(params.get("timeout", 120))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    error_context.context("Remove the existing isa-log device.", test.log.info)
    remove_isa_debugcon(vm)
    vm.destroy()
    error_context.context("Run trace record command on host.",
                          test.log.info)
    bg = utils_test.BackgroundTest(trace_kvm_pio, ())
    bg.start()
    if not utils_misc.wait_for(lambda: bg.is_alive, timeout):
        test.fail("Failed to start command: '%s'" % trace_record_cmd)
    try:
        vm.create()
        vm.verify_alive()
        vm.destroy()
        process.system(stop_trace_record, ignore_status=True, shell=True)
        if not utils_misc.wait_for(
                lambda: not check_trace_process(), timeout, 30, 3):
            test.fail("Failed to stop command: '%s' after %s seconds."
                      % (stop_trace_record, timeout))
        pio_read_counts = int(process.run(
            check_pio_read, shell=True).stdout.decode().strip())
        err_str = "pio_read counts should be greater than 0. "
        err_str += "But the actual counts are %s." % pio_read_counts
        test.assertGreater(pio_read_counts, 0, err_str)
        pio_write_counts = int(process.run(
            check_pio_write, shell=True).stdout.decode().strip())
        if params.get("ovmf_log"):
            err_str = "pio_write counts should be greater than 0. "
            err_str += "But the actual counts are %s." % pio_write_counts
            test.assertGreater(pio_write_counts, 0, err_str)
        else:
            err_str = "pio_write counts should be equal to 0. "
            err_str += "But the actual counts are %s." % pio_write_counts
            test.assertEqual(pio_write_counts, 0, err_str)
    finally:
        if check_trace_process():
            process.system(stop_trace_record, ignore_status=True, shell=True)
コード例 #22
0
ファイル: migration.py プロジェクト: yanglei-rh/tp-qemu
def run(test, params, env):
    """
    KVM migration test:
    1) Get a live VM and clone it.
    2) Verify that the source VM supports migration.  If it does, proceed with
            the test.
    3) Send a migration command to the source VM and wait until it's finished.
    4) Kill off the source VM.
    3) Log into the destination VM after the migration is finished.
    4) Compare the output of a reference command executed on the source with
            the output of the same command on the destination machine.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def guest_stress_start(guest_stress_test):
        """
        Start a stress test in guest, Could be 'iozone', 'dd', 'stress'

        :param type: type of stress test.
        """
        from generic.tests import autotest_control

        timeout = 0

        if guest_stress_test == "autotest":
            test_type = params.get("test_type")
            func = autotest_control.run
            new_params = params.copy()
            new_params["test_control_file"] = "%s.control" % test_type

            args = (test, new_params, env)
            timeout = 60
        elif guest_stress_test == "dd":
            vm = env.get_vm(env, params.get("main_vm"))
            vm.verify_alive()
            session = vm.wait_for_login(timeout=login_timeout)
            func = session.cmd_output
            args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M "
                    "count=100; rm -f /tmp/test; done", login_timeout,
                    logging.info)

        logging.info("Start %s test in guest", guest_stress_test)
        bg = utils_test.BackgroundTest(func, args)
        params["guest_stress_test_pid"] = bg
        bg.start()
        if timeout:
            logging.info("sleep %ds waiting guest test start.", timeout)
            time.sleep(timeout)
        if not bg.is_alive():
            test.fail("Failed to start guest test!")

    def guest_stress_deamon():
        """
        This deamon will keep watch the status of stress in guest. If the stress
        program is finished before migration this will restart it.
        """
        while True:
            bg = params.get("guest_stress_test_pid")
            action = params.get("action")
            if action == "run":
                logging.debug("Check if guest stress is still running")
                guest_stress_test = params.get("guest_stress_test")
                if bg and not bg.is_alive():
                    logging.debug("Stress process finished, restart it")
                    guest_stress_start(guest_stress_test)
                    time.sleep(30)
                else:
                    logging.debug("Stress still on")
            else:
                if bg and bg.is_alive():
                    try:
                        stress_stop_cmd = params.get("stress_stop_cmd")
                        vm = env.get_vm(env, params.get("main_vm"))
                        vm.verify_alive()
                        session = vm.wait_for_login()
                        if stress_stop_cmd:
                            logging.warn(
                                "Killing background stress process "
                                "with cmd '%s', you would see some "
                                "error message in client test result,"
                                "it's harmless.", stress_stop_cmd)
                            session.cmd(stress_stop_cmd)
                        bg.join(10)
                    except Exception:
                        pass
                break
            time.sleep(10)

    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")
    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
    mig_exec_cmd_src = params.get("migration_exec_cmd_src")
    mig_exec_cmd_dst = params.get("migration_exec_cmd_dst")
    if mig_exec_cmd_src and "gzip" in mig_exec_cmd_src:
        mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec")
        mig_exec_file += "-%s" % utils_misc.generate_random_string(8)
        mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file
        mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file
    offline = params.get("offline", "no") == "yes"
    check = params.get("vmstate_check", "no") == "yes"
    living_guest_os = params.get("migration_living_guest", "yes") == "yes"
    deamon_thread = None

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    if living_guest_os:

        session = vm.wait_for_login(timeout=login_timeout)

        # Get the output of migration_test_command
        test_command = params.get("migration_test_command")
        reference_output = session.cmd_output(test_command)

        # Start some process in the background (and leave the session open)
        background_command = params.get("migration_bg_command", "")

        # check whether tcpdump is installed
        if "tcpdump" in background_command:
            if not utils_package.package_install("tcpdump", session):
                test.cancel("Please install tcpdump to proceed")
        session.sendline(background_command)
        time.sleep(5)

        # Start another session with the guest and make sure the background
        # process is running
        session2 = vm.wait_for_login(timeout=login_timeout)

        try:
            check_command = params.get("migration_bg_check_command", "")
            error_context.context(
                "Checking the background command in the "
                "guest pre migration", logging.info)
            if session2.cmd_status(check_command, timeout=30) != 0:
                test.error("migration bg check command failed")
            session2.close()

            # Start stress test in guest.
            guest_stress_test = params.get("guest_stress_test")
            if guest_stress_test:
                guest_stress_start(guest_stress_test)
                params["action"] = "run"
                deamon_thread = utils_test.BackgroundTest(
                    guest_stress_deamon, ())
                deamon_thread.start()

            capabilities = ast.literal_eval(
                params.get("migrate_capabilities", "{}"))
            inner_funcs = ast.literal_eval(
                params.get("migrate_inner_funcs", "[]"))
            mig_parameters = ast.literal_eval(
                params.get("migrate_parameters", "None"))
            target_mig_parameters = params.get("target_migrate_parameters",
                                               "None")
            target_mig_parameters = ast.literal_eval(target_mig_parameters)
            migrate_parameters = (mig_parameters, target_mig_parameters)
            pre_migrate = get_functions(params.get("pre_migrate"), globals())

            # Migrate the VM
            ping_pong = params.get("ping_pong", 1)
            for i in range(int(ping_pong)):
                # run some functions before migrate start
                for func in pre_migrate:
                    func(vm, params, test)
                if i % 2 == 0:
                    logging.info("Round %s ping..." % str(i / 2))
                else:
                    logging.info("Round %s pong..." % str(i / 2))
                try:
                    vm.migrate(mig_timeout,
                               mig_protocol,
                               mig_cancel_delay,
                               offline,
                               check,
                               migration_exec_cmd_src=mig_exec_cmd_src,
                               migration_exec_cmd_dst=mig_exec_cmd_dst,
                               migrate_capabilities=capabilities,
                               mig_inner_funcs=inner_funcs,
                               env=env,
                               migrate_parameters=migrate_parameters)
                except qemu_monitor.MonitorNotSupportedMigCapError as e:
                    test.cancel("Unable to access capability: %s" % e)
                except:
                    raise

            # Set deamon thread action to stop after migrate
            params["action"] = "stop"

            # run some functions after migrate finish.
            post_migrate = get_functions(params.get("post_migrate"), globals())
            for func in post_migrate:
                func(vm, params, test)

            # Log into the guest again
            logging.info("Logging into guest after migration...")
            session2 = vm.wait_for_login(timeout=30)
            logging.info("Logged in after migration")

            # Make sure the background process is still running
            error_context.context(
                "Checking the background command in the "
                "guest post migration", logging.info)
            session2.cmd(check_command, timeout=30)

            # Get the output of migration_test_command
            output = session2.cmd_output(test_command)

            # Compare output to reference output
            if output != reference_output:
                logging.info("Command output before migration differs from "
                             "command output after migration")
                logging.info("Command: %s", test_command)
                logging.info(
                    "Output before:" +
                    utils_misc.format_str_for_message(reference_output))
                logging.info("Output after:" +
                             utils_misc.format_str_for_message(output))
                test.fail("Command '%s' produced different output "
                          "before and after migration" % test_command)

        finally:
            # Kill the background process
            if session2 and session2.is_alive():
                bg_kill_cmd = params.get("migration_bg_kill_command", None)
                ignore_status = params.get("migration_bg_kill_ignore_status",
                                           1)
                if bg_kill_cmd is not None:
                    try:
                        session2.cmd(bg_kill_cmd)
                    except aexpect.ShellCmdError as details:
                        # If the migration_bg_kill_command rc differs from
                        # ignore_status, it means the migration_bg_command is
                        # no longer alive. Let's ignore the failure here if
                        # that is the case.
                        if not int(details.status) == int(ignore_status):
                            raise
                    except aexpect.ShellTimeoutError:
                        logging.debug(
                            "Remote session not responsive, "
                            "shutting down VM %s", vm.name)
                        vm.destroy(gracefully=True)
            if deamon_thread is not None:
                # Set deamon thread action to stop after migrate
                params["action"] = "stop"
                deamon_thread.join()
    else:
        # Just migrate without depending on a living guest OS
        vm.migrate(mig_timeout,
                   mig_protocol,
                   mig_cancel_delay,
                   offline,
                   check,
                   migration_exec_cmd_src=mig_exec_cmd_src,
                   migration_exec_cmd_dst=mig_exec_cmd_dst,
                   migrate_parameters=migrate_parameters)
コード例 #23
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run iozone on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    iozone_control_file = params.get("iozone_control_file", "iozone.control")
    timeout = int(params.get("LB_domstate_with_iozone_loop_time", "600"))
    # Run iozone on guest.
    params["test_control_file"] = iozone_control_file
    # Fork a new process to run iozone on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    iozone_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_iozone_running():
            return (not session.cmd_status("ps -ef|grep iozone|grep -v grep"))

        if not utils_misc.wait_for(_is_iozone_running, timeout=120):
            test.cancel("Failed to run iozone in guest.\n"
                        "Since we need to run a autotest of iozone "
                        "in guest, so please make sure there are "
                        "some necessary packages in guest,"
                        "such as gcc, tar, bzip2")
    logging.debug("Iozone is already running in VMs.")

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()

        # Reboot vms after func_in_thread to check vm is running normally.
        for vm in vms:
            vm.reboot()
    finally:
        # Clean up.
        logging.debug("No cleaning operation for this test.")
コード例 #24
0
ファイル: stop_continue.py プロジェクト: CongLi/tp-qemu
def run(test, params, env):
    """
    Suspend a running Virtual Machine and verify its state.

    1) Boot the vm
    2) Do preparation operation (Optional)
    3) Start a background process (Optional)
    4) Stop the VM
    5) Verify the status of VM is 'paused'
    6) Verify the session has no response
    7) Resume the VM
    8) Verify the status of VM is 'running'
    9) Re-login the guest
    10) Do check operation (Optional)
    11) Do clean operation (Optional)

    :param test: Kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=login_timeout)
    session_bg = None

    start_bg_process = params.get("start_bg_process")
    try:
        prepare_op = params.get("prepare_op")
        if prepare_op:
            error.context("Do preparation operation: '%s'" % prepare_op,
                          logging.info)
            op_timeout = float(params.get("prepare_op_timeout", 60))
            session.cmd(prepare_op, timeout=op_timeout)

        if start_bg_process:
            bg_cmd = params.get("bg_cmd")
            error.context("Start a background process: '%s'" % bg_cmd,
                          logging.info)
            session_bg = vm.wait_for_login(timeout=login_timeout)
            bg_cmd_timeout = float(params.get("bg_cmd_timeout", 240))
            args = (bg_cmd, bg_cmd_timeout)

            bg = utils_test.BackgroundTest(session_bg.cmd, args)
            bg.start()

        error.base_context("Stop the VM", logging.info)
        vm.pause()
        error.context("Verify the status of VM is 'paused'", logging.info)
        vm.verify_status("paused")

        error.context("Verify the session has no response", logging.info)
        if session.is_responsive():
            msg = "Session is still responsive after stop"
            logging.error(msg)
            raise error.TestFail(msg)
        session.close()
        time.sleep(float(params.get("pause_time", 0)))
        error.base_context("Resume the VM", logging.info)
        vm.resume()
        error.context("Verify the status of VM is 'running'", logging.info)
        vm.verify_status("running")

        error.context("Re-login the guest", logging.info)
        session = vm.wait_for_login(timeout=login_timeout)

        if start_bg_process:
            if bg:
                bg.join()

        check_op = params.get("check_op")
        if check_op:
            error.context("Do check operation: '%s'" % check_op, logging.info)
            op_timeout = float(params.get("check_op_timeout", 60))
            s, o = session.cmd_status_output(check_op, timeout=op_timeout)
            if s != 0:
                raise error.TestFail("Something wrong after stop continue, "
                                     "check command report: %s" % o)
    finally:
        clean_op = params.get("clean_op")
        if clean_op:
            error.context("Do clean operation: '%s'" % clean_op, logging.info)
            op_timeout = float(params.get("clean_op_timeout", 60))
            session.cmd(clean_op, timeout=op_timeout, ignore_all_errors=True)
        session.close()
        if session_bg:
            session_bg.close()
コード例 #25
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) Run unixbench on guest.
    3) Run domstate_switch test for each VM.
    3) Clean up.
    """
    vms = env.get_all_vms()
    unixbench_control_file = params.get("unixbench_controle_file",
                                        "unixbench5.control")
    timeout = int(params.get("LB_domstate_with_unixbench_loop_time", "600"))
    # Run unixbench on guest.
    params["test_control_file"] = unixbench_control_file
    # Fork a new process to run unixbench on each guest.
    for vm in vms:
        params["main_vm"] = vm.name
        control_path = os.path.join(test.virtdir, "control",
                                    unixbench_control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

    for vm in vms:
        session = vm.wait_for_login()

        def _is_unixbench_running():
            return (not session.cmd_status("ps -ef|grep perl|grep Run"))

        if not utils_misc.wait_for(_is_unixbench_running, timeout=120):
            test.cancel("Failed to run unixbench in guest.\n"
                        "Since we need to run a autotest of unixbench "
                        "in guest, so please make sure there are some "
                        "necessary packages in guest, such as gcc, tar, bzip2")
    logging.debug("Unixbench is already running in VMs.")

    # Run unixbench on host.
    from autotest.client import common
    autotest_client_dir = os.path.dirname(common.__file__)
    autotest_local_path = os.path.join(autotest_client_dir, "autotest-local")
    unixbench_control_path = os.path.join(data_dir.get_root_dir(), "shared",
                                          "control", unixbench_control_file)
    args = [
        autotest_local_path, unixbench_control_path, '--verbose', '-t',
        unixbench_control_file
    ]
    host_unixbench_process = subprocess.Popen(args)

    try:
        # Create a BackgroundTest for each vm to run test domstate_switch.
        backgroud_tests = []
        for vm in vms:
            bt = utils_test.BackgroundTest(func_in_thread, [vm, timeout, test])
            bt.start()
            backgroud_tests.append(bt)

        for bt in backgroud_tests:
            bt.join()
    finally:
        # Kill process on host running unixbench.
        utils_misc.kill_process_tree(host_unixbench_process.pid)
        # Remove the result dir produced by subprocess host_unixbench_process.
        unixbench_control_result = os.path.join(autotest_client_dir, "results",
                                                unixbench_control_file)
        if os.path.isdir(unixbench_control_result):
            shutil.rmtree(unixbench_control_result)
コード例 #26
0
def run_stress_kernel_compile(tests, params, env):
    """
    Boot VMs and run kernel compile inside VM parallel.

    1) Boot up VMs:
       Every VM has 4G vmem, the total vmem of VMs' are
       $overcommit times as host's mem.
    2) Launch kernel compile inside every guest.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def kernelcompile(session, vm_name):
        vm = env.get_vm(vm_name)
        ip = vm.get_address()
        path = params.get("download_url")
        logging.info("kernel path = %s" % path)
        get_kernel_cmd = "wget %s" % path
        try:
            status, output = session.cmd_status_output(get_kernel_cmd,
                                                       timeout=240)
            if status != 0:
                logging.error(output)
                raise error.TestFail("Fail to download the kernel"
                                     " in %s" % vm_name)
            else:
                logging.info("Completed download the kernel src"
                             " in %s" % vm_name)
            test_cmd = params.get("test_cmd")
            status, output = session.cmd_status_output(test_cmd, timeout=1200)
            if status != 0:
                logging.error(output)
        finally:
            status, _ = utils_test.ping(ip, count=10, timeout=30)
            if status != 0:
                raise error.TestFail("vm no response, pls check serial log")

    over_c = float(params.get("overcommit", 1.5))
    guest_number = int(params.get("guest_number", "1"))

    if guest_number < 1:
        logging.warn("At least boot up one guest for this test,"
                     " set up guest number to 1")
        guest_number = 1

    for tag in range(1, guest_number):
        params["vms"] += " stress_guest_%s" % tag

    mem_host = utils_memory.memtotal() / 1024
    vmem = int(mem_host * over_c / guest_number)

    if vmem < 256:
        raise error.TestNAError("The memory size set for guest is too small."
                                " Please try less than %s guests"
                                " in this host." % guest_number)
    params["mem"] = vmem
    params["start_vm"] = "yes"
    login_timeout = int(params.get("login_timeout", 360))

    env_process.preprocess(tests, params, env)

    sessions_info = []
    for vm_name in params["vms"].split():
        vm = env.get_vm(vm_name)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)
        if not session:
            raise error.TestFail("Could not log into guest %s" % vm_name)

        sessions_info.append([session, vm_name])

    # run kernel compile in vms
    try:
        logging.info("run kernel compile in vms")
        bg_threads = []
        for session_info in sessions_info:
            session = session_info[0]
            vm_name = session_info[1]
            bg_thread = utils_test.BackgroundTest(kernelcompile,
                                                  (session, vm_name))
            bg_thread.start()
            bg_threads.append(bg_thread)

        completed = False
        while not completed:
            completed = True
            for bg_thread in bg_threads:
                if bg_thread.is_alive():
                    completed = False
    finally:
        try:
            for bg_thread in bg_threads:
                if bg_thread:
                    bg_thread.join()
        finally:
            for session_info in sessions_info:
                session_info[0].close()
コード例 #27
0
def run(test, params, env):
    """
    Transparent hugepage relocated test with quantification.
    The pages thp deamon will scan for one round set to 4096, and the sleep
    time will be set to 10 seconds. And alloc sleep time is set to 1 minute.
    So the hugepage size should increase 16M every 10 seconds, and when system
    is busy and it failed to allocate hugepage for guest, the value will keep
    the same in 1 minute. We will check that value every 10 seconds and check
    if it is following the rules.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def nr_hugepage_check(sleep_time, wait_time):
        time_last = 0
        while True:
            value = int(utils_memory.read_from_meminfo("AnonHugePages"))
            nr_hugepages.append(value)
            time_stamp = time.time()
            if time_last != 0:
                if nr_hugepages[-2] != nr_hugepages[-1]:
                    time_last = time_stamp
                elif time_stamp - time_last > wait_time:
                    logging.info("Huge page size stop changed")
                    break
            else:
                time_last = time_stamp
            time.sleep(sleep_time)

    logging.info("Relocated test start")
    login_timeout = float(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    free_memory = utils_memory.read_from_meminfo("MemFree")
    hugepage_size = utils_memory.read_from_meminfo("Hugepagesize")
    mem = params.get("mem")
    vmsm = int(mem) + 128
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    if vmsm < int(free_memory) / 1024:
        nr_hugetlbfs = vmsm * 1024 / int(hugepage_size)
    else:
        nr_hugetlbfs = None
    # Get dd speed in host
    start_time = time.time()
    cmd = "dd if=/dev/urandom of=/tmp/speed_test bs=4K count=256"
    s, o = commands.getstatusoutput(cmd)
    end_time = time.time()
    dd_timeout = vmsm * (end_time - start_time) * 2
    nr_hugepages = []
    thp_cfg = params.get("thp_test_config")
    s_time = int(re.findall("scan_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000
    w_time = int(re.findall("alloc_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000

    try:
        logging.info("Turn off swap in guest")
        s, o = session.cmd_status_output("swapoff -a")
        if s != 0:
            logging.warning("Didn't turn off swap in guest")
        s, o = session.cmd_status_output("cat /proc/meminfo")
        mem_free_filter = "MemFree:\s+(.\d+)\s+(\w+)"
        guest_mem_free, guest_unit = re.findall(mem_free_filter, o)[0]
        if re.findall("[kK]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) / 1024)
        elif re.findall("[gG]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) * 1024)
        elif re.findall("[mM]", guest_unit):
            pass
        else:
            guest_mem_free = str(int(guest_mem_free) / 1024 / 1024)

        file_size = min(1024, int(guest_mem_free) / 2)
        cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size
        s, o = session.cmd_status_output(cmd)
        if nr_hugetlbfs:
            hugepage_cfg = open(hugetlbfs_path, "w")
            hugepage_cfg.write(str(nr_hugetlbfs))
            hugepage_cfg.close()

        if not os.path.isdir('/space'):
            os.makedirs('/space')
        if os.system("mount -t tmpfs -o size=%sM none /space" % vmsm):
            raise error.TestError("Can not mount tmpfs")

        # Try to make some fragment in memory
        # The total size of fragments is vmsm
        count = vmsm * 1024 / 4
        cmd = "for i in `seq %s`; do dd if=/dev/urandom of=/space/$i" % count
        cmd += " bs=4K count=1 & done"
        logging.info("Start to make fragment in host")
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            raise error.TestError("Can not dd in host")
    finally:
        s, o = commands.getstatusoutput("umount /space")

    bg = utils_test.BackgroundTest(nr_hugepage_check, (s_time, w_time))
    bg.start()

    while bg.is_alive():
        count = file_size / 2
        cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count
        s, o = session.cmd_status_output(cmd, dd_timeout)

    if bg:
        bg.join()
    mem_increase_step = int(re.findall("pages_to_scan:(\d+)",
                                       thp_cfg)[0]) / 512
    mem_increase = 0
    w_step = w_time / s_time + 1
    count = 0
    last_value = nr_hugepages.pop()
    while len(nr_hugepages) > 0:
        current = nr_hugepages.pop()
        if current == last_value:
            count += 1
        elif current < last_value:
            if last_value - current < mem_increase_step * 0.95:
                raise error.TestError("Hugepage memory increased too slow")
            mem_increase += last_value - current
            count = 0
        if count > w_step:
            logging.warning("Memory didn't increase in %s s" % (count *
                                                                s_time))
    if mem_increase < file_size * 0.5:
        raise error.TestError("Hugepages allocated can not reach a half: %s/%s"
                              % (mem_increase, file_size))
    session.close()
    logging.info("Relocated test succeed")