def get_migration_statistic(vm):
        last_transfer_mem = 0
        transfered_mem = 0
        mig_stat = utils.Statistic()
        for _ in range(30):
            o = vm.monitor.info("migrate")
            warning_msg = ("Migration already ended. Migration speed is"
                           " probably too high and will block vm while"
                           " filling its memory.")
            fail_msg = ("Could not determine the transferred memory from"
                        " monitor data: %s" % o)
            if isinstance(o, str):
                if "status: active" not in o:
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = int(get_mig_speed.search(o).groups()[0])
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)
            else:
                if o.get("status") != "active":
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = o.get("ram").get("transferred") / (1024)
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)

            real_mig_speed = (transfered_mem - last_transfer_mem) / 1024

            last_transfer_mem = transfered_mem

            logging.debug("Migration speed: %s MB/s" % (real_mig_speed))
            mig_stat.record(real_mig_speed)
            time.sleep(1)

        return mig_stat
        def migrate_vms_src(self, mig_data):
            super(TestMultihostMigrationLongWait,
                  self).migrate_vms_src(mig_data)
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started',
                                self.mig_timeout)
            vm = mig_data.vms[0]
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted',
                                self.mig_timeout)

            session = vm.wait_for_login(timeout=self.login_timeout)
            session.cmd("killall cpuflags-test")
            if params.get("mig_cancel", "no") == "yes":
                vm.monitor.cmd("migrate_cancel")
                vm.monitor.info("migrate")
            else:
                for _ in range(self.mig_fir_timeout):
                    state = vm.monitor.info("migrate")
                    if type(state) is str:
                        if "failed" in state:
                            break
                    else:
                        if state["status"] == "failed":
                            break
                    time.sleep(1)
                else:
                    raise error.TestWarn("Firewall block migraiton timeout"
                                         " is too short: %s. For completing"
                                         " the test increase mig_timeout in"
                                         " variant dest-problem-test." %
                                         (self.mig_fir_timeout))

            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interfynish',
                                self.mig_timeout)
示例#3
0
def run(test, params, env):
    """
    Installs virtualization software using the selected installers

    :param test: test object.
    :param params: Dictionary with test parameters.
    :param env: Test environment.
    """
    srcdir = params.get("srcdir", test.srcdir)
    params["srcdir"] = srcdir

    # Flag if a installer minor failure occurred
    minor_failure = False
    minor_failure_reasons = []

    for name in params.get("installers", "").split():
        installer_obj = installer.make_installer(name, params, test)
        installer_obj.install()
        installer_obj.write_version_keyval(test)
        if installer_obj.minor_failure is True:
            minor_failure = True
            reason = "%s_%s: %s" % (installer_obj.name,
                                    installer_obj.mode,
                                    installer_obj.minor_failure_reason)
            minor_failure_reasons.append(reason)

    if minor_failure:
        raise error.TestWarn("Minor (worked around) failures during build "
                             "test: %s" % ", ".join(minor_failure_reasons))
示例#4
0
def run(test, params, env):
    """
    Fileup disk test:
    Purpose to expand the qcow2 file to its max size.
    Suggest to test rebooting vm after this test.
    1). Fillup guest disk (root mount point) using dd if=/dev/zero.
    2). Clean up big files in guest with rm command.


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)
    session2 = vm.wait_for_serial_login(timeout=login_timeout)

    fillup_timeout = int(params.get("fillup_timeout"))
    fillup_size = int(params.get("fillup_size"))
    fill_dir = params.get("guest_testdir", "/tmp")
    filled = False
    number = 0

    try:
        error.context("Start filling the disk in %s" % fill_dir, logging.info)
        cmd = params.get("fillup_cmd")
        while not filled:
            # As we want to test the backing file, so bypass the cache
            tmp_cmd = cmd % (fill_dir, number, fillup_size)
            logging.debug(tmp_cmd)
            s, o = session.cmd_status_output(tmp_cmd, timeout=fillup_timeout)
            if "No space left on device" in o:
                logging.debug("Successfully filled up the disk")
                filled = True
            elif s != 0:
                raise error.TestFail("Command dd failed to execute: %s" % o)
            number += 1
    finally:
        error.context("Cleaning the temporary files...", logging.info)
        try:
            clean_cmd = params.get("clean_cmd") % fill_dir
            status, output = session2.cmd_status_output(clean_cmd)
            if status != 0:
                raise error.TestWarn("Cleaning the temporary files failed ! \n"
                                     "Guest may be unresponsive or "
                                     "command timeout. \n"
                                     "The error info is: %s \n" % output)
            else:
                logging.debug(output)
        finally:
            show_fillup_dir_cmd = params.get("show_fillup_dir_cmd") % fill_dir
            output = session2.cmd_output_safe(show_fillup_dir_cmd)
            logging.debug("The fill_up dir shows:\n %s", output)
            if session:
                session.close()
            if session2:
                session2.close()
示例#5
0
 def get_device(self, test):
     if getattr(test, 'device', None):
         device = test.device
     else:
         if self.device:
             device = self.device
         else:
             raise error.TestWarn('No device specified for blktrace')
     return device
示例#6
0
    def _restore_bootloader_config(session, default_kernel):
        error.context("Restore the grub to old version")

        if not default_kernel:
            logging.warn("Could not get previous grub config, do noting.")
            return

        cmd = "grubby --set-default=%s" % default_kernel.strip()
        try:
            session.cmd(cmd)
        except Exception, e:
            raise error.TestWarn("Restore grub failed: '%s'" % e)
示例#7
0
def run(test, params, env):
    """
    boot cpu model test:
    steps:
    1). boot guest with cpu model
    2). check flags if enable_check == "yes", otherwise shutdown guest

    :param test: QEMU test object
    :param params: Dictionary with the test parameters

    """
    cpu_vendor = utils_misc.get_cpu_vendor()
    host_model = utils_misc.get_host_cpu_models()

    model_list = params.get("cpu_model")
    if not model_list:
        if cpu_vendor == "unknow":
            raise error.TestError("unknow cpu vendor")
        else:
            model_list = params.get("cpu_model_%s" % cpu_vendor,
                                    host_model[-1])

    extra_flags = params.get("cpu_model_flags_%s" % cpu_vendor, "")
    if extra_flags:
        cpu_flags = params.get("cpu_model_flags", "") + extra_flags
        params["cpu_model_flags"] = cpu_flags

    if model_list:
        model_list = model_list.split(" ")
        for model in model_list:
            if model in host_model or model == "host":
                params["cpu_model"] = model
                params["start_vm"] = "yes"
                env_process.preprocess_vm(test, params, env, params["main_vm"])
                # check guest flags
                if params.get("enable_check", "no") == "yes":
                    utils_test.run_virt_sub_test(test,
                                                 params,
                                                 env,
                                                 sub_type="flag_check")
                else:
                    # log in and shutdown guest
                    utils_test.run_virt_sub_test(test,
                                                 params,
                                                 env,
                                                 sub_type="shutdown")
                    logging.info("shutdown guest successfully")
            else:
                if params.get("enable_check", "no") == "yes":
                    raise error.TestWarn("Can not test %s model on %s host, "
                                         "pls use %s host" %
                                         (model, host_model[0], model))
示例#8
0
def run_install(test, params, env):
    """
    Installs virtualization software using the selected installers

    :param test: test object.
    :param params: Dictionary with test parameters.
    :param env: Test environment.
    """
    srcdir = params.get("srcdir", test.srcdir)
    params["srcdir"] = srcdir

    # Flag if a installer minor failure occurred
    minor_failure = False
    minor_failure_reasons = []

    sm = software_manager.SoftwareManager()

    for name in params.get("installers", "").split():
        installer_obj = installer.make_installer(name, params, test)
        if installer_obj.name == "ovirt_engine_sdk":
            installer_obj.install(
                cleanup=False, build=False, install=False)
            if installer_obj.minor_failure is True:
                minor_failure = True
                reason = "%s_%s: %s" % (installer_obj.name,
                                        installer_obj.mode,
                                        installer_obj.minor_failure_reason)
                minor_failure_reasons.append(reason)
            ovirt_src = os.path.join(srcdir, installer_obj.name)
            topdir = os.getcwd()
            os.chdir(ovirt_src)
            utils.make("rpm")
            os.chdir(topdir)
            pkgs = glob.glob(
                os.path.join(ovirt_src, "rpmtop/RPMS/noarch/*"))
            for pkg in pkgs:
                sm.install(pkg)
        else:
            installer_obj.install(cleanup=False, build=False)
            time.sleep(5)
            if installer_obj.minor_failure is True:
                minor_failure = True
                reason = "%s_%s: %s" % (installer_obj.name,
                                        installer_obj.mode,
                                        installer_obj.minor_failure_reason)
                minor_failure_reasons.append(reason)
            env.register_installer(installer_obj)

    if minor_failure:
        raise error.TestWarn("Minor (worked around) failures during build "
                             "test: %s" % ", ".join(minor_failure_reasons))
示例#9
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    subtest_dirs += data_dir.SubdirList(
                        self.testdir, bootstrap.test_filter)
                    specific_testdir = os.path.join(self.bindir,
                                                    params.get("vm_type"),
                                                    "tests")
                    subtest_dirs += data_dir.SubdirList(
                        specific_testdir, bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "param 'type = %s' on this cartesian dict",
                        params.get("type"))
                    t_types = params.get("type").split()
                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
示例#10
0
def run(test, params, env):
    """
    We find out that sometimes the guest crashed between two
    cases in a loop.  For example:
    1. case A executed the steps and finished with good.
    2. the post process do some check, pause or shutdown
       operates.
    3. guest crashed and reboot or just quit(But the case
       already finished with good).
    4. case B start and also didn't get the guest crashed
       status.

    Check if there is any core dump file in guest image .

    1) Check all the existing guest images in the image directory.
    2) Mount guest image on the host.
    3) Check "C:\windows\dump" for Windows and core file for Linux.
    4) If yes, copy them to working directory.
    """

    # Preliminary
    # yum install libguestfs libguestfs-tools libguestfs-winsupport
    try:
        os_dep.command("guestmount")
    except:
        warn_msg = "Need packages: libguestfs libguestfs-tools" + \
                   " libguestfs-winsupport"
        raise error.TestWarn(warn_msg)

    # define the file name need to be checked
    file_check_win_default = "Windows/dump"
    file_check_linux_default = "var/crash/*"
    host_mountpoint_default = "mnt/mountpoint"

    host_mountpoint = params.get("host_mountpoint", host_mountpoint_default)
    host_mountpoint = utils_misc.get_path(test.debugdir, host_mountpoint)
    file_chk_for_win = params.get("coredump_check_win", file_check_win_default)
    file_chk_for_linux = params.get("coredump_check_linux",
                                    file_check_linux_default)

    # check if the host_mountpoint exists.
    if not (os.path.isdir(host_mountpoint)
            and os.path.exists(host_mountpoint)):
        os.makedirs(host_mountpoint)

    coredump_file_exists = False
    check_files = [file_chk_for_win, file_chk_for_linux]
    check_results = []

    error.context("Get all the images name", logging.info)
    images = get_images()
    error.context("images: %s" % images, logging.info)

    # find all the images
    # mount per-image to check if the dump file exists
    error.context("Check coredump file per-image", logging.info)
    for image in images:
        status, chk_msgs = check_images_coredump(image, host_mountpoint,
                                                 check_files, test.debugdir)
        coredump_file_exists = coredump_file_exists or status
        if status:
            check_results.append((image, chk_msgs))

    # if found, report the result
    if coredump_file_exists:
        report_msg = format_report(check_results)
        raise error.TestFail(report_msg)
示例#11
0
def run_cpu_add(test, params, env):
    """
    Runs CPU hotplug test:

    1) Boot the vm with -smp X,maxcpus=Y
    2) After logged into the vm, check CPUs number
    3) Stop the guest if config 'stop_before_hotplug'
    4) Do cpu hotplug
    5) Resume the guest if config 'stop_before_hotplug'
    6) Recheck guest get hot-pluged CPUs
    7) Do cpu online/offline in guest if config
    8) Run sub test after CPU Hotplug
    9) Recheck guest cpus after sub test

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def qemu_guest_cpu_match(vm, vcpu_been_pluged=0, wait_time=60):
        """
        Check Whether the vcpus are matche
        """
        total_cpus_expected = int(vm.cpuinfo.smp) + int(vcpu_been_pluged)
        if utils_misc.wait_for(lambda:
                               ((total_cpus_expected == vm.get_cpu_count()) and
                                (vm.get_cpu_count() == len(vm.vcpu_threads))),
                               wait_time,
                               first=10,
                               step=5.0):
            logging.info("Cpu number in cmd_line, qemu and guest are match")
            return True
        err_msg = "Cpu mismatch! "
        err_msg += "after hotplug %s vcpus, " % vcpu_been_pluged
        err_msg += "there shoule be %s vcpus exist, " % total_cpus_expected
        err_msg += "in qemu %s vcpus threads works, " % len(vm.vcpu_threads)
        err_msg += "in guest %s cpus works." % vm.get_cpu_count()
        raise error.TestFail(err_msg)

    def cpu_online_offline(session, cpu_id, online=""):
        """
        Do cpu online/offline in guest
        """
        if online == "online":
            online = 1
        else:
            online = 0
        online_file = "/sys/devices/system/cpu/cpu%s/online" % cpu_id
        if session.cmd_status("test -f %s" % online_file):
            logging.info("online file %s not exist, just pass the cpu%s" %
                         (online_file, cpu_id))
            return
        session.cmd("echo %s > %s " % (online, online_file))

    def onoff_para_opt(onoff_params):
        """
        Online offline params anaylize
        Return a cpu list need do online offline
        """
        onoff_list = []
        offline = onoff_params.split(",")
        for item in offline:
            if "-" in item:
                onoff_list += range(int(item.split("-")[0]),
                                    int(item.split("-")[1]))
            else:
                onoff_list.append(item)
        return [str(i) for i in onoff_list]

    timeout = int(params.get("login_timeout", 360))
    onoff_iterations = int(params.get("onoff_iterations", 2))
    vcpu_need_hotplug = int(params.get("vcpu_need_hotplug", 1))

    error.context("Boot the vm, with '-smp X,maxcpus=Y' option", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)
    maxcpus = vm.cpuinfo.maxcpus

    error.context("Check if cpus in guest matche qemu cmd before hotplug",
                  logging.info)
    qemu_guest_cpu_match(vm)

    # do pre_operation like stop, before vcpu Hotplug
    stop_before_hotplug = params.get("stop_before_hotplug", "no")
    if stop_before_hotplug == 'yes':
        error.context("Stop the guest before hotplug vcpu", logging.info)
        vm.pause()

    error.context("Do cpu hotplug", logging.info)
    if vm.monitor.protocol == 'human':
        human_check_info = params.get("human_error_recheck", None)
        qmp_check_info = None
        hotplug_add_cmd = ""
    elif vm.monitor.protocol == 'qmp':
        qmp_check_info = params.get("qmp_error_recheck", None)
        hotplug_add_cmd = params.get("vcpu_add_cmd", "")
        if hotplug_add_cmd:
            human_check_info = params.get("human_error_recheck", None)
        else:
            human_check_info = None

    vcpu_been_pluged = 0
    for i in range(vcpu_need_hotplug):
        hotplug_vcpu_params = params.object_params("hotplug_vcpu%s" % i)
        plug_cpu_id = len(vm.vcpu_threads)
        plug_cpu_id = hotplug_vcpu_params.get("id", plug_cpu_id)

        (status, output) = vm.hotplug_vcpu(plug_cpu_id, hotplug_add_cmd)

        if status:
            if not qmp_check_info and not human_check_info:
                vcpu_been_pluged += 1
                logging.info("Cpu%s hotplug successfully" % plug_cpu_id)
                logging.info("Now '%s' cpus have been hotpluged" %
                             vcpu_been_pluged)
                continue
            else:
                err_msg = "Qemu should report error, but hotplug successfully"
                raise error.TestFail(err_msg)
        else:
            if not output:
                warn_msg = "Qemu should report some warning information"
                raise error.TestWarn(warn_msg)
            if qmp_check_info and re.findall(qmp_check_info, output, re.I):
                msg = "Hotplug vcpu(id:'%s') error, qemu report the error."
                logging.info(msg % plug_cpu_id)
                logging.debug("QMP error info: '%s'" % output)
                continue
            elif (human_check_info
                  and re.findall(human_check_info, output, re.I)):
                msg = "Hotplug vcpu(id:'%s') error, qemu report the error"
                logging.info(msg % plug_cpu_id)
                logging.debug("Error info: '%s'" % output)
                continue
            else:
                err_msg = "Hotplug error! "
                err_msg += "the hotplug cpu_id is: '%s', " % plug_cpu_id
                err_msg += "the maxcpus allowed is: '%s', " % maxcpus
                err_msg += "qemu cpu list is:'%s'" % vm.monitor.info("cpus")
                logging.debug("The error info is:\n '%s'" % output)
                raise error.TestFail(err_msg)

    if stop_before_hotplug:
        error.context("Resume the guest after cpu hotplug", logging.info)
        vm.resume()

    if params.get("reboot_after_hotplug", False):
        error.context("Reboot guest after hotplug vcpu", logging.info)
        vm.reboot()

    if vcpu_been_pluged != 0:
        error.context("Check whether cpus are match after hotplug",
                      logging.info)
        qemu_guest_cpu_match(vm, vcpu_been_pluged)

    error.context("Do cpu online/offline in guest", logging.info)
    # Window guest doesn't support online/offline test
    if params['os_type'] == "windows":
        logging.info("For windows guest not do online/offline test")
        return

    online_list = []
    offline_list = []
    offline = params.get("offline", "")
    online = params.get("online", "")
    repeat_time = int(params.get("repeat_time", 0))

    if offline:
        offline_list = onoff_para_opt(offline)
        logging.debug("Cpu offline list is %s " % offline_list)
    if online:
        online_list = onoff_para_opt(online)
        logging.debug("Cpu online list is %s " % offline_list)

    for i in range(repeat_time):
        for offline_cpu in offline_list:
            cpu_online_offline(session, offline_cpu)
            time.sleep(onoff_iterations)
        for online_cpu in online_list:
            cpu_online_offline(session, online_cpu, "online")
            time.sleep(onoff_iterations)

    # do sub test after cpu hotplug
    if (params.get("run_sub_test", "no") == "yes"
            and 'sub_test_name' in params):
        sub_test = params['sub_test_name']
        error.context("Run subtest %s after cpu hotplug" % sub_test,
                      logging.info)
        if (sub_test == "guest_suspend"
                and params["guest_suspend_type"] == "disk"):
            vm.params["smp"] = int(vm.cpuinfo.smp) + vcpu_been_pluged
            vcpu_been_pluged = 0
        utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test)
        if sub_test == "shutdown":
            logging.info("Guest shutdown normally after cpu hotplug")
            return
        if params.get("session_need_update", "no") == "yes":
            session = vm.wait_for_login(timeout=timeout)

    if params.get("vcpu_num_rechek", "yes") == "yes":
        error.context("Recheck cpu numbers after operation", logging.info)
        qemu_guest_cpu_match(vm, vcpu_been_pluged)

    if session:
        session.close()
def run(test, params, env):
    """
    KVM migration test:
    1) Get a live VM and clone it.
    2) Verify that the source VM supports migration.  If it does, proceed with
            the test.
    3) Start memory load in vm.
    4) Set defined migration speed.
    5) Send a migration command to the source VM and collecting statistic
            of migration speed.
    !) Checks that migration utilisation didn't slow down in guest stresser
       which would lead to less page-changes than required for this test.
       (migration speed is set too high for current CPU)
    6) Kill both VMs.
    7) Print statistic of migration.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    mig_protocol = params.get("mig_protocol", "tcp")
    base_class = utils_test.qemu.MultihostMigration
    if mig_protocol == "fd":
        base_class = utils_test.qemu.MultihostMigrationFd
    if mig_protocol == "exec":
        base_class = utils_test.qemu.MultihostMigrationExec
    if "rdma" in mig_protocol:
        base_class = utils_test.qemu.MultihostMigrationRdma

    install_path = params.get("cpuflags_install_path", "/tmp")

    vm_mem = int(params.get("mem", "512"))

    get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$", re.MULTILINE)

    mig_speed = params.get("mig_speed", "1G")
    mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2"))

    def get_migration_statistic(vm):
        last_transfer_mem = 0
        transfered_mem = 0
        mig_stat = utils.Statistic()
        for _ in range(30):
            o = vm.monitor.info("migrate")
            warning_msg = ("Migration already ended. Migration speed is"
                           " probably too high and will block vm while"
                           " filling its memory.")
            fail_msg = ("Could not determine the transferred memory from"
                        " monitor data: %s" % o)
            if isinstance(o, str):
                if "status: active" not in o:
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = int(get_mig_speed.search(o).groups()[0])
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)
            else:
                if o.get("status") != "active":
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = o.get("ram").get("transferred") / (1024)
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)

            real_mig_speed = (transfered_mem - last_transfer_mem) / 1024

            last_transfer_mem = transfered_mem

            logging.debug("Migration speed: %s MB/s" % (real_mig_speed))
            mig_stat.record(real_mig_speed)
            time.sleep(1)

        return mig_stat

    class TestMultihostMigration(base_class):
        def __init__(self, test, params, env):
            super(TestMultihostMigration, self).__init__(test, params, env)
            self.mig_stat = None
            self.srchost = self.params.get("hosts")[0]
            self.dsthost = self.params.get("hosts")[1]
            self.id = {
                'src': self.srchost,
                'dst': self.dsthost,
                "type": "speed_measurement"
            }
            self.link_speed = 0

        def check_vms(self, mig_data):
            """
            Check vms after migrate.

            :param mig_data: object with migration data.
            """
            pass

        def migrate_vms_src(self, mig_data):
            """
            Migrate vms source.

            :param mig_Data: Data for migration.

            For change way how machine migrates is necessary
            re implement this method.
            """
            super_cls = super(TestMultihostMigration, self)
            super_cls.migrate_vms_src(mig_data)
            vm = mig_data.vms[0]
            self.mig_stat = get_migration_statistic(vm)

        def migration_scenario(self):
            sync = SyncData(self.master_id(), self.hostid, self.hosts, self.id,
                            self.sync_server)
            srchost = self.params.get("hosts")[0]
            dsthost = self.params.get("hosts")[1]
            vms = [params.get("vms").split()[0]]

            def worker(mig_data):
                vm = mig_data.vms[0]
                session = vm.wait_for_login(timeout=self.login_timeout)

                cpuflags.install_cpuflags_util_on_vm(
                    test, vm, install_path, extra_flags="-msse3 -msse2")

                cmd = ("%s/cpuflags-test --stressmem %d,%d" % (os.path.join(
                    install_path, "cpu_flags"), vm_mem * 4, vm_mem / 2))
                logging.debug("Sending command: %s" % (cmd))
                session.sendline(cmd)

            if self.master_id() == self.hostid:
                server_port = utils_misc.find_free_port(5200, 6000)
                server = listen_server(port=server_port)
                data_len = 0
                sync.sync(server_port, timeout=120)
                client = server.socket.accept()[0]
                endtime = time.time() + 30
                while endtime > time.time():
                    data_len += len(client.recv(2048))
                client.close()
                server.close()
                self.link_speed = data_len / (30 * 1024 * 1024)
                logging.info("Link speed %d MB/s" % (self.link_speed))
                ms = utils.convert_data_size(mig_speed, 'M')
                if (ms > data_len / 30):
                    logging.warn("Migration speed %s MB/s is set faster than "
                                 "real link speed %d MB/s" %
                                 (mig_speed, self.link_speed))
                else:
                    self.link_speed = ms / (1024 * 1024)
            else:
                data = ""
                for _ in range(10000):
                    data += "i"
                server_port = sync.sync(timeout=120)[self.master_id()]
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((self.master_id(), server_port))
                try:
                    endtime = time.time() + 10
                    while endtime > time.time():
                        sock.sendall(data)
                    sock.close()
                except:
                    pass
            self.migrate_wait(vms, srchost, dsthost, worker)

    mig = TestMultihostMigration(test, params, env)
    # Start migration
    mig.run()

    # If machine is migration master check migration statistic.
    if mig.master_id() == mig.hostid:
        mig_speed = utils.convert_data_size(mig_speed, "M")

        mig_stat = mig.mig_stat

        mig_speed = mig_speed / (1024 * 1024)
        real_speed = mig_stat.get_average()
        ack_speed = mig.link_speed * mig_speed_accuracy

        logging.info("Target migration speed: %d MB/s", mig_speed)
        logging.info("Real Link speed: %d MB/s", mig.link_speed)
        logging.info("Average migration speed: %d MB/s",
                     mig_stat.get_average())
        logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min())
        logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max())

        logging.info("Maximum tolerable divergence: %3.1f%%",
                     mig_speed_accuracy * 100)

        if real_speed < mig_speed - ack_speed:
            divergence = (1 - float(real_speed) / float(mig_speed)) * 100
            raise error.TestWarn("Average migration speed (%s MB/s) "
                                 "is %3.1f%% lower than target (%s MB/s)" %
                                 (real_speed, divergence, mig_speed))

        if real_speed > mig_speed + ack_speed:
            divergence = (1 - float(mig_speed) / float(real_speed)) * 100
            raise error.TestWarn("Average migration speed (%s MB/s) "
                                 "is %3.1f %% higher than target (%s MB/s)" %
                                 (real_speed, divergence, mig_speed))
示例#13
0
                ovirt_src = os.path.join(srcdir, installer_obj.name)
                topdir = os.getcwd()
                os.chdir(ovirt_src)
                utils.make("rpm")
                os.chdir(topdir)
                pkgs = glob.glob(
                    os.path.join(ovirt_src, "rpmtop/RPMS/noarch/*"))
                for pkg in pkgs:
                    sm.install(pkg)
            else:
                installer_obj.install(cleanup=False, build=False)
                time.sleep(5)
                if installer_obj.minor_failure == True:
                    minor_failure = True
                    reason = "%s_%s: %s" % (installer_obj.name,
                                            installer_obj.mode,
                                            installer_obj.minor_failure_reason)
                    minor_failure_reasons.append(reason)
                env.register_installer(installer_obj)

    except Exception, e:
        # if the build/install fails, don't allow other tests
        # to get a installer.
        msg = "Virtualization software install failed: %s" % (e)
        env.register_installer(base_installer.FailedInstaller(msg))
        raise

    if minor_failure:
        raise error.TestWarn("Minor (worked around) failures during build "
                             "test: %s" % ", ".join(minor_failure_reasons))
示例#14
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise error.TestError("Directory %s does not "
                                      "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise error.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise error.JobError("Abort requested (%s)" % e)
def run(test, params, env):
    """
    KVM migration test:
    1) Get a live VM and clone it.
    2) Verify that the source VM supports migration.  If it does, proceed with
            the test.
    3) Start memory load on vm.
    4) Send a migration command to the source VM and collecting statistic
            of migration speed.
    !) If migration speed is too high migration could be successful and then
            test ends with warning.
    5) Kill off both VMs.
    6) Print statistic of migration.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    mig_timeout = float(params.get("mig_timeout", "10"))
    mig_protocol = params.get("migration_protocol", "tcp")

    install_path = params.get("cpuflags_install_path", "/tmp")

    vm_mem = int(params.get("mem", "512"))

    get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$", re.MULTILINE)

    mig_speed = params.get("mig_speed", "1G")
    mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2"))
    clonevm = None

    def get_migration_statistic(vm):
        last_transfer_mem = 0
        transfered_mem = 0
        mig_stat = utils.Statistic()
        for _ in range(30):
            o = vm.monitor.info("migrate")
            warning_msg = ("Migration already ended. Migration speed is"
                           " probably too high and will block vm while"
                           " filling its memory.")
            fail_msg = ("Could not determine the transferred memory from"
                        " monitor data: %s" % o)
            if isinstance(o, str):
                if "status: active" not in o:
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = int(get_mig_speed.search(o).groups()[0])
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)
            else:
                if o.get("status") != "active":
                    raise error.TestWarn(warning_msg)
                try:
                    transfered_mem = o.get("ram").get("transferred") / (1024)
                except (IndexError, ValueError):
                    raise error.TestFail(fail_msg)

            real_mig_speed = (transfered_mem - last_transfer_mem) / 1024

            last_transfer_mem = transfered_mem

            logging.debug("Migration speed: %s MB/s" % (real_mig_speed))
            mig_stat.record(real_mig_speed)
            time.sleep(1)

        return mig_stat

    try:
        # Reboot the VM in the background
        cpuflags.install_cpuflags_util_on_vm(test,
                                             vm,
                                             install_path,
                                             extra_flags="-msse3 -msse2")

        vm.monitor.migrate_set_speed(mig_speed)

        cmd = (
            "%s/cpuflags-test --stressmem %d,%d" %
            (os.path.join(install_path, "cpu_flags"), vm_mem * 4, vm_mem / 2))
        logging.debug("Sending command: %s" % (cmd))
        session.sendline(cmd)

        time.sleep(2)

        clonevm = vm.migrate(mig_timeout,
                             mig_protocol,
                             not_wait_for_migration=True,
                             env=env)

        mig_speed = utils.convert_data_size(mig_speed, "M")

        mig_stat = get_migration_statistic(vm)

        mig_speed = mig_speed / (1024 * 1024)
        real_speed = mig_stat.get_average()
        ack_speed = mig_speed * mig_speed_accuracy

        logging.info("Target migration speed: %d MB/s.", mig_speed)
        logging.info("Average migration speed: %d MB/s",
                     mig_stat.get_average())
        logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min())
        logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max())

        logging.info("Maximum tolerable divergence: %3.1f%%",
                     mig_speed_accuracy * 100)

        if real_speed < mig_speed - ack_speed:
            divergence = (1 - float(real_speed) / float(mig_speed)) * 100
            raise error.TestWarn("Average migration speed (%s MB/s) "
                                 "is %3.1f%% lower than target (%s MB/s)" %
                                 (real_speed, divergence, mig_speed))

        if real_speed > mig_speed + ack_speed:
            divergence = (1 - float(mig_speed) / float(real_speed)) * 100
            raise error.TestWarn("Average migration speed (%s MB/s) "
                                 "is %3.1f %% higher than target (%s MB/s)" %
                                 (real_speed, divergence, mig_speed))

    finally:
        session.close()
        if clonevm:
            clonevm.destroy(gracefully=False)
        if vm:
            vm.destroy(gracefully=False)
示例#16
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [
        vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num,
        vcpu_current_num
    ]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s." %
                                (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name,
                                    vcpu_plug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name,
                                    vcpu_unplug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
示例#17
0
def run(test, params, env):
    """
    Test for testing keyboard inputs through spice.
    Test depends on rv_connect test.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    guest_vm = env.get_vm(params["guest_vm"])
    guest_vm.verify_alive()

    client_vm = env.get_vm(params["client_vm"])
    client_vm.verify_alive()

    guest_session = guest_vm.wait_for_login(
        timeout=int(params.get("login_timeout", 360)))
    guest_root_session = guest_vm.wait_for_login(timeout=int(
        params.get("login_timeout", 360)),
                                                 username="******",
                                                 password="******")

    # Verify that gnome is now running on the guest
    try:
        guest_session.cmd("ps aux | grep -v grep | grep gnome-session")
    except aexpect.ShellCmdError:
        raise error.TestWarn("gnome-session was probably not corretly started")

    guest_session.cmd("export DISPLAY=:0.0")

    install_pygtk(guest_root_session, params)

    deploy_test_form(test, guest_vm, params)

    # Get test type and perform proper test
    test_type = params.get("config_test")
    test_mapping = {
        'type_and_func_keys': test_type_and_func_keys,
        'leds_and_esc_keys': test_leds_and_esc_keys,
        'nonus_layout': test_nonus_layout,
        'leds_migration': test_leds_migration
    }
    test_parameters = {
        'type_and_func_keys': (client_vm, guest_session, params),
        'leds_and_esc_keys': (client_vm, guest_session, params),
        'nonus_layout': (client_vm, guest_session, params),
        'leds_migration': (client_vm, guest_vm, guest_session, params)
    }

    try:
        func = test_mapping[test_type]
        args = test_parameters[test_type]
    except:
        raise error.TestFail("Unknown type of test")

    func(*args)

    # Get file with caught keycodes from guest
    result_path = get_test_results(guest_vm)
    # Analyze results and raise fail exp. If sent scancodes
    # do not match with expected keycodes
    result = analyze_results(result_path, test_type)
    if result is not None:
        raise error.TestFail("Testing of sending keys failed:"
                             "  Expected keycode = %s" % result)

    guest_session.close()
示例#18
0
    def check_image(self, params, root_dir):
        """
        Check an image using the appropriate tools for each virt backend.

        @param params: Dictionary containing the test parameters.
        @param root_dir: Base directory for relative filenames.

        @note: params should contain:
               image_name -- the name of the image file, without extension
               image_format -- the format of the image (qcow2, raw etc)

        @raise VMImageCheckError: In case qemu-img check fails on the image.
        """
        image_filename = self.image_filename
        logging.debug("Checking image file %s", image_filename)
        qemu_img_cmd = self.image_cmd
        image_is_qcow2 = self.image_format == 'qcow2'
        if os.path.exists(image_filename) and image_is_qcow2:
            # Verifying if qemu-img supports 'check'
            q_result = utils.run(qemu_img_cmd, ignore_status=True)
            q_output = q_result.stdout
            check_img = True
            if not "check" in q_output:
                logging.error("qemu-img does not support 'check', "
                              "skipping check")
                check_img = False
            if not "info" in q_output:
                logging.error("qemu-img does not support 'info', "
                              "skipping check")
                check_img = False
            if check_img:
                try:
                    utils.system("%s info %s" % (qemu_img_cmd,
                                                 image_filename))
                except error.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)

                cmd_result = utils.run("%s check %s" %
                                       (qemu_img_cmd, image_filename),
                                       ignore_status=True)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise error.TestWarn("qemu-img check error. Some bad "
                                         "data in the image may have gone"
                                         " unnoticed")
                # Exit status 2 is data corruption for sure,
                # so fail the test
                elif cmd_result.exit_status == 2:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise virt_vm.VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise error.TestWarn("Leaked clusters were noticed"
                                         " during image check. No data "
                                         "integrity problem was found "
                                         "though.")

                # Just handle normal operation
                if params.get("backup_image", "no") == "yes":
                    self.backup_image(params, root_dir, "backup", True)
        else:
            if not os.path.exists(image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_qcow2:
                logging.debug("Image file %s not qcow2, skipping check",
                              image_filename)
示例#19
0
文件: virt.py 项目: spiceqa/virt-test
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_path = params.get("vm_type")
        other_subtests_dirs = params.get("other_tests_dirs", "")
        if other_subtests_dirs:
            env_path = other_subtests_dirs
        env_filename = os.path.join(self.bindir, env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)
                    # Verify if we have the correspondent source file for it
                    shared_test_dir = os.path.dirname(self.virtdir)
                    shared_test_dir = os.path.join(shared_test_dir, "tests")
                    subtest_dirs += data_dir.SubdirList(shared_test_dir,
                                                        bootstrap.test_filter)
                    virt_test_dir = os.path.join(self.bindir,
                                                 params.get("vm_type"), "tests")
                    subtest_dirs += data_dir.SubdirList(virt_test_dir,
                                                        bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    t_types = params.get("type").split()
                    test_modules = []
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules.append((t_type,
                                             imp.load_module(t_type, f, p, d)))
                        f.close()
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    for t_type, test_module in test_modules:
                        msg = "Running function: %s.run_%s()" % (
                            t_type, t_type)
                        logging.info(msg)
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
def run(test, params, env):
    """
    This tests the disk hotplug/unplug functionality.
    1) prepares multiple disks to be hotplugged
    2) hotplugs them
    3) verifies that they are in qtree/guest system/...
    4) unplugs them
    5) verifies they are not in qtree/guest system/...
    6) repeats $repeat_times
    *) During the whole test stress_cmd might be executed

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def verify_qtree(params, info_qtree, info_block, proc_scsi, qdev):
        """
        Verifies that params, info qtree, info block and /proc/scsi/ matches
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :param info_qtree: Output of "info qtree" monitor command
        :type info_qtree: string
        :param info_block: Output of "info block" monitor command
        :type info_block: dict of dicts
        :param proc_scsi: Output of "/proc/scsi/scsi" guest file
        :type proc_scsi: string
        :param qdev: qcontainer representation
        :type qdev: virttest.qemu_devices.qcontainer.DevContainer
        """
        err = 0
        qtree = qemu_qtree.QtreeContainer()
        qtree.parse_info_qtree(info_qtree)
        disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes())
        (tmp1, tmp2) = disks.parse_info_block(info_block)
        err += tmp1 + tmp2
        err += disks.generate_params()
        err += disks.check_disk_params(params)
        (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(proc_scsi)
        err += tmp1 + tmp2
        if err:
            logging.error("info qtree:\n%s", info_qtree)
            logging.error("info block:\n%s", info_block)
            logging.error("/proc/scsi/scsi:\n%s", proc_scsi)
            logging.error(qdev.str_bus_long())
            raise error.TestFail("%s errors occurred while verifying"
                                 " qtree vs. params" % err)

    def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices):
        """
        Inserts no_disks disks int qdev using randomized args from param_matrix
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.qcontainer.DevContainer
        :param param_matrix: Matrix of randomizable params
        :type param_matrix: list of lists
        :param no_disks: Desired number of disks
        :type no_disks: integer
        :param params: Dictionary with the test parameters
        :type params: virttest.utils_params.Params
        :return: (newly added devices, number of added disks)
        :rtype: tuple(list, integer)
        """
        dev_idx = 0
        _new_devs_fmt = ""
        _formats = param_matrix.pop('fmt', [params.get('drive_format')])
        formats = _formats[:]
        if len(new_devices) == 1:
            strict_mode = None
        else:
            strict_mode = True
        i = 0
        while i < no_disks:
            # Set the format
            if len(formats) < 1:
                if i == 0:
                    raise error.TestError("Fail to add any disks, probably bad"
                                          " configuration.")
                logging.warn("Can't create desired number '%s' of disk types "
                             "'%s'. Using '%d' no disks.", no_disks,
                             _formats, i)
                break
            name = 'stg%d' % i
            args = {'name': name, 'filename': stg_image_name % i}
            fmt = random.choice(formats)
            if fmt == 'virtio_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'virtio-scsi-pci'
            elif fmt == 'lsi_scsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'lsi53c895a'
            elif fmt == 'spapr_vscsi':
                args['fmt'] = 'scsi-hd'
                args['scsi_hba'] = 'spapr-vscsi'
            else:
                args['fmt'] = fmt
            # Other params
            for key, value in param_matrix.iteritems():
                args[key] = random.choice(value)

            try:
                devs = qdev.images_define_by_variables(**args)
                # parallel test adds devices in mixed order, force bus/addrs
                qdev.insert(devs, strict_mode)
            except utils.DeviceError:
                for dev in devs:
                    if dev in qdev:
                        qdev.remove(dev, recursive=True)
                formats.remove(fmt)
                continue

            params = convert_params(params, args)
            env_process.preprocess_image(test, params.object_params(name),
                                         name)
            new_devices[dev_idx].extend(devs)
            dev_idx = (dev_idx + 1) % len(new_devices)
            _new_devs_fmt += "%s(%s) " % (name, fmt)
            i += 1
        if _new_devs_fmt:
            logging.info("Using disks: %s", _new_devs_fmt[:-1])
        param_matrix['fmt'] = _formats
        return new_devices, params

    def _hotplug(new_devices, monitor, prefix=""):
        """
        Do the actual hotplug of the new_devices using monitor monitor.
        :param new_devices: List of devices which should be hotplugged
        :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice
        :param monitor: Monitor which should be used for hotplug
        :type monitor: virttest.qemu_monitor.Monitor
        """
        hotplug_outputs = []
        hotplug_sleep = float(params.get('wait_between_hotplugs', 0))
        for device in new_devices:      # Hotplug all devices
            time.sleep(hotplug_sleep)
            hotplug_outputs.append(device.hotplug(monitor))
        time.sleep(hotplug_sleep)
        failed = []
        passed = []
        unverif = []
        for device in new_devices:      # Verify the hotplug status
            out = hotplug_outputs.pop(0)
            out = device.verify_hotplug(out, monitor)
            if out is True:
                passed.append(str(device))
            elif out is False:
                failed.append(str(device))
            else:
                unverif.append(str(device))
        if not failed and not unverif:
            logging.debug("%sAll hotplugs verified (%s)", prefix, len(passed))
        elif not failed:
            logging.warn("%sHotplug status:\nverified %s\nunverified %s",
                         prefix, passed, unverif)
        else:
            logging.error("%sHotplug status:\nverified %s\nunverified %s\n"
                          "failed %s", prefix, passed, unverif, failed)
            logging.error("qtree:\n%s", monitor.info("qtree", debug=False))
            raise error.TestFail("%sHotplug of some devices failed." % prefix)

    def hotplug_serial(new_devices, monitor):
        _hotplug(new_devices[0], monitor)

    def hotplug_parallel(new_devices, monitors):
        threads = []
        for i in xrange(len(new_devices)):
            name = "Th%s: " % i
            logging.debug("%sworks with %s devices", name,
                          [_.str_short() for _ in new_devices[i]])
            thread = threading.Thread(target=_hotplug, name=name[:-2],
                                      args=(new_devices[i], monitors[i], name))
            thread.start()
            threads.append(thread)
        for thread in threads:
            thread.join()
        logging.debug("All threads finished.")

    def _postprocess_images():
        # remove and check the images
        _disks = []
        for disk in params['images'].split(' '):
            if disk.startswith("stg"):
                env_process.postprocess_image(test, params.object_params(disk),
                                              disk)
            else:
                _disks.append(disk)
            params['images'] = " ".join(_disks)

    def _unplug(new_devices, qdev, monitor, prefix=""):
        """
        Do the actual unplug of new_devices using monitor monitor
        :param new_devices: List of devices which should be hotplugged
        :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice
        :param qdev: qemu devices container
        :type qdev: virttest.qemu_devices.qcontainer.DevContainer
        :param monitor: Monitor which should be used for hotplug
        :type monitor: virttest.qemu_monitor.Monitor
        """
        unplug_sleep = float(params.get('wait_between_unplugs', 0))
        unplug_outs = []
        unplug_devs = []
        for device in new_devices[::-1]:    # unplug all devices
            if device in qdev:  # Some devices are removed with previous one
                time.sleep(unplug_sleep)
                unplug_devs.append(device)
                unplug_outs.append(device.unplug(monitor))
                # Remove from qdev even when unplug failed because further in
                # this test we compare VM with qdev, which should be without
                # these devices. We can do this because we already set the VM
                # as dirty.
                if LOCK:
                    LOCK.acquire()
                qdev.remove(device)
                if LOCK:
                    LOCK.release()
        time.sleep(unplug_sleep)
        failed = []
        passed = []
        unverif = []
        for device in unplug_devs:          # Verify unplugs
            _out = unplug_outs.pop(0)
            # unplug effect can be delayed as it waits for OS respone before
            # it removes the device form qtree
            for _ in xrange(50):
                out = device.verify_unplug(_out, monitor)
                if out is True:
                    break
                time.sleep(0.1)
            if out is True:
                passed.append(str(device))
            elif out is False:
                failed.append(str(device))
            else:
                unverif.append(str(device))

        if not failed and not unverif:
            logging.debug("%sAll unplugs verified (%s)", prefix, len(passed))
        elif not failed:
            logging.warn("%sUnplug status:\nverified %s\nunverified %s",
                         prefix, passed, unverif)
        else:
            logging.error("%sUnplug status:\nverified %s\nunverified %s\n"
                          "failed %s", prefix, passed, unverif, failed)
            logging.error("qtree:\n%s", monitor.info("qtree", debug=False))
            raise error.TestFail("%sUnplug of some devices failed." % prefix)

    def unplug_serial(new_devices, qdev, monitor):
        _unplug(new_devices[0], qdev, monitor)

    def unplug_parallel(new_devices, qdev, monitors):
        threads = []
        for i in xrange(len(new_devices)):
            name = "Th%s: " % i
            logging.debug("%sworks with %s devices", name,
                          [_.str_short() for _ in new_devices[i]])
            thread = threading.Thread(target=_unplug,
                                      args=(new_devices[i], qdev, monitors[i]))
            thread.start()
            threads.append(thread)
        for thread in threads:
            thread.join()
        logging.debug("All threads finished.")

    def verify_qtree_unsupported(params, info_qtree, info_block, proc_scsi,
                                 qdev):
        return logging.warn("info qtree not supported. Can't verify qtree vs. "
                            "guest disks.")

    vm = env.get_vm(params['main_vm'])
    qdev = vm.devices
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
    out = vm.monitor.human_monitor_cmd("info qtree", debug=False)
    if "unknown command" in str(out):
        verify_qtree = verify_qtree_unsupported

    stg_image_name = params['stg_image_name']
    if not stg_image_name[0] == "/":
        stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name)
    stg_image_num = int(params['stg_image_num'])
    stg_params = params.get('stg_params', '').split(' ')
    i = 0
    while i < len(stg_params) - 1:
        if not stg_params[i].strip():
            i += 1
            continue
        if stg_params[i][-1] == '\\':
            stg_params[i] = '%s %s' % (stg_params[i][:-1],
                                       stg_params.pop(i + 1))
        i += 1

    param_matrix = {}
    for i in xrange(len(stg_params)):
        if not stg_params[i].strip():
            continue
        (cmd, parm) = stg_params[i].split(':', 1)
        # ',' separated list of values
        parm = parm.split(',')
        j = 0
        while j < len(parm) - 1:
            if parm[j][-1] == '\\':
                parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1))
            j += 1

        param_matrix[cmd] = parm

    # Modprobe the module if specified in config file
    module = params.get("modprobe_module")
    if module:
        session.cmd("modprobe %s" % module)

    stress_cmd = params.get('stress_cmd')
    if stress_cmd:
        funcatexit.register(env, params.get('type'), stop_stresser, vm,
                            params.get('stress_kill_cmd'))
        stress_session = vm.wait_for_login(timeout=10)
        for _ in xrange(int(params.get('no_stress_cmds', 1))):
            stress_session.sendline(stress_cmd)

    rp_times = int(params.get("repeat_times", 1))
    queues = params.get("multi_disk_type") == "parallel"
    if queues:  # parallel
        queues = xrange(len(vm.monitors))
        hotplug = hotplug_parallel
        unplug = unplug_parallel
        monitor = vm.monitors
        global LOCK
        LOCK = threading.Lock()
    else:   # serial
        queues = xrange(1)
        hotplug = hotplug_serial
        unplug = unplug_serial
        monitor = vm.monitor
    context_msg = "Running sub test '%s' %s"
    error.context("Verify disk before test", logging.info)
    info_qtree = vm.monitor.info('qtree', False)
    info_block = vm.monitor.info_block(False)
    proc_scsi = session.cmd_output('cat /proc/scsi/scsi')
    verify_qtree(params, info_qtree, info_block, proc_scsi, qdev)
    for iteration in xrange(rp_times):
        error.context("Hotplugging/unplugging devices, iteration %d"
                      % iteration, logging.info)
        sub_type = params.get("sub_type_before_plug")
        if sub_type:
            error.context(context_msg % (sub_type, "before hotplug"),
                          logging.info)
            utils_test.run_virt_sub_test(test, params, env, sub_type)

        error.context("Insert devices into qdev", logging.debug)
        qdev.set_dirty()
        new_devices = [[] for _ in queues]
        new_devices, params = insert_into_qdev(qdev, param_matrix,
                                               stg_image_num, params,
                                               new_devices)

        error.context("Hotplug the devices", logging.debug)
        hotplug(new_devices, monitor)
        time.sleep(float(params.get('wait_after_hotplug', 0)))

        error.context("Verify disks after hotplug", logging.debug)
        info_qtree = vm.monitor.info('qtree', False)
        info_block = vm.monitor.info_block(False)
        vm.verify_alive()
        proc_scsi = session.cmd_output('cat /proc/scsi/scsi')
        verify_qtree(params, info_qtree, info_block, proc_scsi, qdev)
        qdev.set_clean()

        sub_type = params.get("sub_type_after_plug")
        if sub_type:
            error.context(context_msg % (sub_type, "after hotplug"),
                          logging.info)
            utils_test.run_virt_sub_test(test, params, env, sub_type)

        sub_type = params.get("sub_type_before_unplug")
        if sub_type:
            error.context(context_msg % (sub_type, "before hotunplug"),
                          logging.info)
            utils_test.run_virt_sub_test(test, params, env, sub_type)

        error.context("Unplug and remove the devices", logging.debug)
        unplug(new_devices, qdev, monitor)
        _postprocess_images()

        error.context("Verify disks after unplug", logging.debug)
        time.sleep(float(params.get('wait_after_unplug', 0)))
        info_qtree = vm.monitor.info('qtree', False)
        info_block = vm.monitor.info_block(False)
        vm.verify_alive()
        proc_scsi = session.cmd_output('cat /proc/scsi/scsi')
        verify_qtree(params, info_qtree, info_block, proc_scsi, qdev)
        # we verified the unplugs, set the state to 0
        for _ in xrange(qdev.get_state()):
            qdev.set_clean()

        sub_type = params.get("sub_type_after_unplug")
        if sub_type:
            error.context(context_msg % (sub_type, "after hotunplug"),
                          logging.info)
            utils_test.run_virt_sub_test(test, params, env, sub_type)

    # Check for various KVM failures
    error.context("Validating VM after all disk hotplug/unplugs",
                  logging.debug)
    vm.verify_alive()
    out = session.cmd_output('dmesg')
    if "I/O error" in out:
        logging.warn(out)
        raise error.TestWarn("I/O error messages occured in dmesg, check"
                             "the log for details.")
示例#21
0
def run(test, params, env):
    """
    Test failover by team driver

    1) Boot a vm with 4 nics.
    2) inside guest, configure the team driver.
    3) inside guest, ping host
    4) inside guest, repeated down the slaves one by one.
    5) check ping_result.

    :param test: Kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def team_port_add(ifnames, team_if):
        """Team0 add ports and return the ip link result for debuging"""
        for port in ifnames:
            session_serial.cmd_output_safe(params["clearip_cmd"] % port)
            session_serial.cmd_output_safe(params["setdown_cmd"] % port)
            session_serial.cmd_output_safe(params["addport_cmd"] % port)
        output_teamnl = session_serial.cmd_output_safe(params["portchk_cmd"])
        ports = re.findall(r"%s" % params["ptn_teamnl"], output_teamnl)
        for port in ifnames:
            if port not in ports:
                raise error.TestFail("Add %s to %s failed." % (port, team_if))
        session_serial.cmd_output_safe(params["killdhclient_cmd"])
        output = session_serial.cmd_output_safe(params["getip_cmd"],
                                                timeout=300)
        team_ip = re.search(r"%s" % params["ptn_ipv4"], output).group()
        if not team_ip:
            raise error.TestFail("Failed to get ip address of %s" % team_if)
        return ports, team_ip

    def failover(ifnames, timeout):
        """func for failover"""
        time.sleep(3)
        starttime = time.time()
        while True:
            pid_ping = session_serial.cmd_output_safe("pidof ping")
            pid = re.findall(r"(\d+)", pid_ping)
            if not pid:
                break
                # if ping finished, will break the loop.
            for port in ifnames:
                session_serial.cmd_output_safe(params["setdown_cmd"] % port)
                time.sleep(random.randint(5, 30))
                session_serial.cmd_output_safe(params["setup_cmd"] % port)
            endtime = time.time()
            timegap = endtime - starttime
            if timegap > timeout:
                break

    def check_ping(status, output):
        """ ratio <5% is acceptance."""
        if status != 0:
            raise error.TestFail("Ping failed, staus:%s, output:%s"
                                 % (status, output))
        # if status != 0 the ping process seams hit issue.
        ratio = utils_test.get_loss_ratio(output)
        if ratio == -1:
            raise error.TestFail('''The ratio is %s, and status is %s,
                                    output is %s''' % (ratio, status, output))
        elif ratio > int(params["failed_ratio"]):
            raise error.TestFail("The loss raito is %s, test failed" % ratio)
        logging.info("ping pass with loss raito:%s, that less than %s" %
                     (ratio, params["failed_ratio"]))

    def team_if_exist():
        """ judge if team is alive well."""
        team_exists_cmd = params.get("team_if_exists_cmd")
        return session_serial.cmd_status(team_exists_cmd) == 0

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 1200))
    session_serial = vm.wait_for_serial_login(timeout=timeout)
    ifnames = [utils_net.get_linux_ifname(session_serial,
                                          vm.get_mac_address(vlan))
               for vlan, nic in enumerate(vm.virtnet)]
    session_serial.cmd_output_safe(params["nm_stop_cmd"])
    team_if = params.get("team_if")
    # initial

    error.context("Step1: Configure the team environment", logging.info)
    # steps of building the teaming environment starts
    modprobe_cmd = "modprobe team"
    session_serial.cmd_output_safe(modprobe_cmd)
    session_serial.cmd_output_safe(params["createteam_cmd"])
    # this cmd is to create the team0 and correspoding userspace daemon
    if not team_if_exist():
        raise error.TestFail("Interface %s is not created." % team_if)
    # check if team0 is created successfully
    ports, team_ip = team_port_add(ifnames, team_if)
    logging.debug("The list of the ports that added to %s : %s"
                  % (team_if, ports))
    logging.debug("The ip address of %s : %s" % (team_if, team_ip))
    output = session_serial.cmd_output_safe(params["team_debug_cmd"])
    logging.debug("team interface configuration: %s" % output)
    route_cmd = session_serial.cmd_output_safe(params["route_cmd"])
    logging.debug("The route table of guest: %s" % route_cmd)
    # this is not this case checkpoint, just to check if route works fine
    # steps of building finished

    try:
        error.context("Login in guest via ssh", logging.info)
        # steps of testing this case starts
        session = vm.wait_for_login(timeout=timeout)
        dest = utils_net.get_ip_address_by_interface(params["netdst"])
        count = params.get("count")
        timeout = float(count) * 2
        error.context("Step2: Check if guest can ping out:", logging.info)
        status, output = utils_test.ping(dest=dest, count=10,
                                         interface=team_if,
                                         timeout=30,
                                         session=session)
        check_ping(status, output)
        # small ping check if the team0 works w/o failover
        error.context("Step3: Start failover testing until ping finished",
                      logging.info)
        failover_thread = utils.InterruptedThread(failover, (ifnames, timeout))
        failover_thread.start()
        # start failover loop until ping finished
        error.context("Step4: Start ping host for %s counts"
                      % count, logging.info)
        if failover_thread.is_alive():
            status, output = utils_test.ping(dest=dest, count=count,
                                             interface=team_if,
                                             timeout=float(count) * 1.5,
                                             session=session)
            error.context("Step5: Check if ping succeeded", logging.info)
            check_ping(status, output)
        else:
            raise error.TestWarn("The failover thread is not alive")
        time.sleep(3)
        try:
            timeout = timeout * 1.5
            failover_thread.join(timeout)
        except Exception:
            raise error.TestWarn("Failed to join the failover thread")
        # finish the main steps and check the result
        session_serial.cmd_output_safe(params["killteam_cmd"])
        if team_if_exist():
            raise error.TestFail("Remove %s failed" % team_if)
        logging.info("%s removed" % team_if)
        # remove the team0 and the daemon, check if succeed
    finally:
        if session:
            session.close()
示例#22
0
    def check_image(self, params, root_dir):
        """
        Check an image using the appropriate tools for each virt backend.

        :param params: Dictionary containing the test parameters.
        :param root_dir: Base directory for relative filenames.

        :note: params should contain:
               image_name -- the name of the image file, without extension
               image_format -- the format of the image (qcow2, raw etc)

        :raise VMImageCheckError: In case qemu-img check fails on the image.
        """
        image_filename = self.image_filename
        logging.debug("Checking image file %s", image_filename)
        qemu_img_cmd = self.image_cmd
        image_is_checkable = self.image_format in ['qcow2', 'qed']

        if (storage.file_exists(params, image_filename) or
                params.get("enable_gluster", "no") == "yes") and image_is_checkable:
            check_img = self.support_cmd("check") and self.support_cmd("info")
            if not check_img:
                logging.debug("Skipping image check "
                              "(lack of support in qemu-img)")
            else:
                try:
                    utils.run("%s info %s" % (qemu_img_cmd, image_filename),
                              verbose=True)
                except error.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)

                cmd_result = utils.run("%s check %s" %
                                       (qemu_img_cmd, image_filename),
                                       ignore_status=True, verbose=True)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise error.TestWarn("qemu-img check error. Some bad "
                                         "data in the image may have gone"
                                         " unnoticed (%s)" % image_filename)
                # Exit status 2 is data corruption for sure,
                # so fail the test
                elif cmd_result.exit_status == 2:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise virt_vm.VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise error.TestWarn("Leaked clusters were noticed"
                                         " during image check. No data "
                                         "integrity problem was found "
                                         "though. (%s)" % image_filename)

                # Just handle normal operation
                if params.get("backup_image", "no") == "yes":
                    self.backup_image(params, root_dir, "backup", True, True)
        else:
            if not storage.file_exists(params, image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_checkable:
                logging.debug(
                    "Image format %s is not checkable, skipping check",
                    self.image_format)
示例#23
0
def run_warning(test, params, env):
    """
    Raise TestWarn exception (should trigger WARN in simple harness).
    """
    raise error.TestWarn("Warn test is raising a test warning!")