Example #1
0
 def plug(self):
     hotplug_cmd = self.params.get("cpu_hotplug_cmd", "")
     for cpu in range(self.smp, self.maxcpus, self.vcpu_threads):
         error_context.context("hot-pluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
         time.sleep(1)
     utils_misc.check_if_vm_vcpu_match(240, self.vm)
Example #2
0
 def unplug(self):
     self.vm.monitor.info("status") 
     hotplug_cmd = self.params.get("cpu_hotunplug_cmd", "")
     for cpu in range(self.smp, self.maxcpus, self.vcpu_threads):
         error_context.context("hot-unpluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
         time.sleep(1)
     self.vm.monitor.info("status") 
     utils_misc.check_if_vm_vcpu_match(348, self.vm)
Example #3
0
File: new.py Project: maxujun/test
 def plug(self, sleeptime, vcpus):
     self.vm.monitor.info("status") 
     hotplug_cmd = self.params.get("cpu_hotplug_cmd", "")
     for cpu in range(self.smp, int(vcpus), self.vcpu_threads):
         error_context.context("hot-pluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
         time.sleep(int(sleeptime))
     self.vm.monitor.info("status") 
     self.vm.resume()
     utils_misc.check_if_vm_vcpu_match(self.maxcpus, self.vm)
Example #4
0
def run(test, params, env):
    """
    NOTE: hotplug_vcpu is added since RHEL.6.3,
          so not boot with hmp is consider here.
    Test steps:
        1) boot the vm with -smp X,maxcpus=Y
        2) after logged into the vm, check vcpus number
        3) hotplug non-existed(no in 1..160) vcpus to guest.
        4) check guest vcpu quantity, should didn't changed
    params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """

    hotplug_cmd = "cpu_set %s online"

    error_context.context(
        "boot the vm, with '-smp X,maxcpus=Y' option,"
        "thus allow hotplug vcpu", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    error_context.context(
        "check if CPUs in guest matches qemu cmd "
        "before hot-plug", logging.info)
    smp_by_cmd = int(params.get("smp"))
    if not utils_misc.check_if_vm_vcpu_match(smp_by_cmd, vm):
        test.error("CPU quantity mismatch cmd before hotplug !")
    # Start vCPU hotplug
    error_context.context("hotplugging non-existed vCPU...", logging.info)
    vcpus_need_hotplug = params.get("nonexist_vcpu", "-1 161").split(" ")
    for vcpu in vcpus_need_hotplug:
        try:
            error_context.context("hot-pluging vCPU %s" % vcpu, logging.info)
            output = vm.monitor.send_args_cmd(hotplug_cmd % vcpu)
        finally:
            error_context.context("output from monitor is: %s" % output,
                                  logging.info)
    # Windows is a little bit lazy that needs more secs to recognize.
    error_context.context(
        "hotplugging finished, let's wait a few sec and"
        " check cpus quantity in guest.", logging.info)
    if not utils_misc.wait_for(
            lambda: utils_misc.check_if_vm_vcpu_match(smp_by_cmd, vm),
            60,
            first=10,
            step=5.0,
            text="retry later"):
        test.fail("CPU quantity mismatch cmd after hotplug !")
Example #5
0
    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")
Example #6
0
File: new.py Project: maxujun/test
 def hotunplug(self, st):
     st_params = self.params.object_params(st)
     interval_time = st_params.get("interval_time") 
     self.vm.monitor.info("status") 
     unplug_cmd = self.params.get("cpu_hotunplug_cmd", "")
     if "max" in st_params.get("unplug_vcpus"):
         unplug_vcpus = int(self.vcpu_threads) 
     else:
         unplug_vcpus = int(st_params.get("unplug_vcpus"))
     for cpu in range(unplug_vcpus, self.current_cpus, self.vcpu_threads):
         error_context.context("hot-unpluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=unplug_cmd, unplug="yes")
         time.sleep(int(interval_time))
     self.vm.monitor.info("status") 
     self.vm.resume()
     utils_misc.check_if_vm_vcpu_match(self.smp, self.vm)
Example #7
0
    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")
Example #8
0
def run(test, params, env):
    """
    NOTE: hotplug_vcpu is added since RHEL.6.3,
          so not boot with hmp is consider here.
    Test steps:
        1) boot the vm with -smp X,maxcpus=Y
        2) after logged into the vm, check vcpus number
        3) hotplug non-existed(no in 1..160) vcpus to guest.
        4) check guest vcpu quantity, should didn't changed
    params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """

    hotplug_cmd = "cpu_set %s online"

    error.context("boot the vm, with '-smp X,maxcpus=Y' option,"
                  "thus allow hotplug vcpu", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    error.context("check if CPUs in guest matches qemu cmd "
                  "before hot-plug", logging.info)
    smp_by_cmd = int(params.get("smp"))
    if not utils_misc.check_if_vm_vcpu_match(smp_by_cmd, vm):
        raise error.TestError("CPU quantity mismatch cmd before hotplug !")
    # Start vCPU hotplug
    error.context("hotplugging non-existed vCPU...", logging.info)
    vcpus_need_hotplug = params.get("nonexist_vcpu", "-1 161").split(" ")
    for vcpu in vcpus_need_hotplug:
        try:
            error.context("hot-pluging vCPU %s" % vcpu, logging.info)
            output = vm.monitor.send_args_cmd(hotplug_cmd % vcpu)
        finally:
            error.context("output from monitor is: %s" % output, logging.info)
    # Windows is a little bit lazy that needs more secs to recognize.
    error.context("hotplugging finished, let's wait a few sec and"
                  " check cpus quantity in guest.", logging.info)
    if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
                               smp_by_cmd, vm),
                               60, first=10, step=5.0, text="retry later"):
        raise error.TestFail("CPU quantity mismatch cmd after hotplug !")
Example #9
0
    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: utils_misc.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120, step=5, text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')
Example #10
0
    def verify(vm, total_cpus):
        output = vm.monitor.send_args_cmd("info cpus")
        logging.debug("Output of info CPUs:\n%s", output)

        cpu_regexp = re.compile(r"CPU #(\d+)")
        total_cpus_monitor = len(cpu_regexp.findall(output))
        if total_cpus_monitor != total_cpus:
            test.fail("Monitor reports %s CPUs, when VM should have"
                      " %s" % (total_cpus_monitor, total_cpus))
        error_context.context("hotplugging finished, let's wait a few sec and"
                              " check CPUs quantity in guest.", logging.info)
        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
                                   total_cpus, vm),
                                   60 + total_cpus, first=10,
                                   step=5.0, text="retry later"):
            test.fail("CPU quantity mismatch cmd after hotplug !")
        error_context.context("rebooting the vm and check CPU quantity !",
                              logging.info)
        session = vm.reboot()
        if not utils_misc.check_if_vm_vcpu_match(total_cpus, vm):
            test.fail("CPU quantity mismatch cmd after hotplug and reboot !")
Example #11
0
def check_vcpu_value(vm, exp_vcpu, vcpupin=None, option="", guest_agent=False):
    """
    Check domain vcpu, including vcpucount, vcpuinfo, vcpupin, vcpu number and
    cputune in domain xml, vcpu number inside the domain.

    :param vm: VM object
    :param exp_vcpu: dict of expect vcpu number:
        exp_vcpu['max_config'] = maximum config vcpu number
        exp_vcpu['max_live'] = maximum live vcpu number
        exp_vcpu['cur_config'] = current config vcpu number
        exp_vcpu['cur_live'] = current live vcpu number
        exp_vcpu['guest_live'] = vcpu number inside the domain
    :param vcpupin: A Dict of expect vcpu affinity
    :param option: Option for virsh commands(setvcpu, setvcpus etc)
    :param guest_agent: True if agent present

    :return: True if the exp_vcpu values matches with virsh API values
            False if not
    """
    final_result = True
    logging.debug("Expect vcpu number: %s", exp_vcpu)

    # 1.1 Check virsh vcpucount output
    if not check_vcpucount(vm, exp_vcpu, option, guest_agent):
        final_result = False

    # 1.2 Check virsh vcpuinfo output
    if not check_vcpuinfo(vm, exp_vcpu):
        final_result = False

    # 1.3 Check affinity from virsh vcpupin,virsh vcpuinfo, xml(cputune)
    if vcpupin:
        if not check_affinity(vm, vcpupin):
            final_result = False

    # 1.4 Check the vcpu count in the xml
    if not check_xmlcount(vm, exp_vcpu, option):
        final_result = False

    if vm.is_alive() and (not vm.is_paused()) and "live" in option:
        # 1.5 Check inside the guest
        if not utils_misc.check_if_vm_vcpu_match(exp_vcpu['guest_live'], vm):
            final_result = False
        # 1.6 Check guest numa
        if not guest_numa_check(vm, exp_vcpu):
            final_result = False
    # 1.7 Check virsh domstats output
    if not check_vcpu_domstats(vm, exp_vcpu):
        final_result = False

    return final_result
def check_vcpu_value(vm, exp_vcpu, vcpupin=None, option="", guest_agent=False):
    """
    Check domain vcpu, including vcpucount, vcpuinfo, vcpupin, vcpu number and
    cputune in domain xml, vcpu number inside the domain.

    :param vm: VM object
    :param exp_vcpu: dict of expect vcpu number:
        exp_vcpu['max_config'] = maximum config vcpu number
        exp_vcpu['max_live'] = maximum live vcpu number
        exp_vcpu['cur_config'] = current config vcpu number
        exp_vcpu['cur_live'] = current live vcpu number
        exp_vcpu['guest_live'] = vcpu number inside the domain
    :param vcpupin: A Dict of expect vcpu affinity
    :param option: Option for virsh commands(setvcpu, setvcpus etc)
    :param guest_agent: True if agent present

    :return: True if the exp_vcpu values matches with virsh API values
            False if not
    """
    final_result = True
    logging.debug("Expect vcpu number: %s", exp_vcpu)

    # 1.1 Check virsh vcpucount output
    if not check_vcpucount(vm, exp_vcpu, option, guest_agent):
        final_result = False

    # 1.2 Check virsh vcpuinfo output
    if not check_vcpuinfo(vm, exp_vcpu):
        final_result = False

    # 1.3 Check affinity from virsh vcpupin,virsh vcpuinfo, xml(cputune)
    if vcpupin:
        if not check_affinity(vm, vcpupin):
            final_result = False

    # 1.4 Check the vcpu count in the xml
    if not check_xmlcount(vm, exp_vcpu, option):
        final_result = False

    if vm.is_alive() and (not vm.is_paused()) and "live" in option:
        # 1.5 Check inside the guest
        if not utils_misc.check_if_vm_vcpu_match(exp_vcpu['guest_live'], vm):
            final_result = False
        # 1.6 Check guest numa
        if not guest_numa_check(vm, exp_vcpu):
            final_result = False
    # 1.7 Check virsh domstats output
    if not check_vcpu_domstats(vm, exp_vcpu):
        final_result = False

    return final_result
Example #13
0
File: new.py Project: maxujun/test
 def hotplug(self, st):
     st_params = self.params.object_params(st)
     interval_time = st_params.get("interval_time") 
     wait_time = st_params.get("wait_time") 
     self.vm.monitor.info("status") 
     hotplug_cmd = self.params.get("cpu_hotplug_cmd", "")
     print(self.maxcpus)
     if "max" in st_params.get("plug_vcpus"):
         plug_vcpus = int(self.maxcpus) 
     else:
         plug_vcpus = int(st_params.get("plug_vcpus"))
     print(plug_vcpus)
     for cpu in range(self.vcpu_threads, plug_vcpus, self.vcpu_threads):
         error_context.context("hot-pluging vCPU %s" % cpu, logging.info)
         self.vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
         time.sleep(int(interval_time))
     time.sleep(int(wait_time))
     self.vm.monitor.info("status") 
     self.vm.resume()
     if not utils_misc.check_if_vm_vcpu_match(plug_vcpus, self.vm):
         time.sleep(int(wait_time))
         if not utils_misc.check_if_vm_vcpu_match(plug_vcpus, self.vm):
             self.test.fail("Failed to hotplug cpu to %s" % plug_vcpus)
     self.current_cpus = plug_vcpus
Example #14
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status = session.cmd_status(cmd)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" %
                          (feature, guest_version, host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" %
                          (guest_version, host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name,
                                           int(max_vcpu),
                                           cur_vcpu,
                                           sockets=sockets,
                                           cores=cores,
                                           threads=threads,
                                           add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name,
                           int(max_vcpu),
                           "--live",
                           ignore_status=False,
                           debug=True)
            if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name,
                                save_file,
                                ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file,
                                       ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
Example #15
0
def run(test, params, env):
    """
    Runs CPU hotplug test:

    1) Boot the vm with -smp X,maxcpus=Y
    2) After logged into the vm, check CPUs number
    3) Send the monitor command cpu_set [cpu id] for each cpu we wish to have
    4) Verify if guest has the additional CPUs showing up
    5) reboot the vm
    6) recheck guest get hot-pluged CPUs
    7) Try to bring them online by writing 1 to the 'online' file inside
       that dir(Linux guest only)
    8) Run the CPU Hotplug test suite shipped with autotest inside guest
       (Linux guest only)

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    error.context("boot the vm, with '-smp X,maxcpus=Y' option,"
                  "thus allow hotplug vcpu", logging.info)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    maxcpus = int(params.get("maxcpus", 160))
    current_cpus = int(params.get("smp", 1))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    cpu_hotplug_cmd = params.get("cpu_hotplug_cmd", "")

    if n_cpus_add + current_cpus > maxcpus:
        logging.warn("CPU quantity more than maxcpus, set it to %s", maxcpus)
        total_cpus = maxcpus
    else:
        total_cpus = current_cpus + n_cpus_add

    error.context("check if CPUs in guest matches qemu cmd "
                  "before hot-plug", logging.info)
    if not utils_misc.check_if_vm_vcpu_match(current_cpus, vm):
        raise error.TestError("CPU quantity mismatch cmd before hotplug !")

    for cpu in range(current_cpus, total_cpus):
        error.context("hot-pluging vCPU %s" % cpu, logging.info)
        vm.hotplug_vcpu(cpu_id=cpu, plug_command=cpu_hotplug_cmd)

    output = vm.monitor.send_args_cmd("info cpus")
    logging.debug("Output of info CPUs:\n%s", output)

    cpu_regexp = re.compile("CPU #(\d+)")
    total_cpus_monitor = len(cpu_regexp.findall(output))
    if total_cpus_monitor != total_cpus:
        raise error.TestFail("Monitor reports %s CPUs, when VM should have"
                             " %s" % (total_cpus_monitor, total_cpus))
    # Windows is a little bit lazy that needs more secs to recognize.
    error.context("hotplugging finished, let's wait a few sec and"
                  " check CPUs quantity in guest.", logging.info)
    if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
                               total_cpus, vm),
                               60 + total_cpus, first=10,
                               step=5.0, text="retry later"):
        raise error.TestFail("CPU quantity mismatch cmd after hotplug !")
    error.context("rebooting the vm and check CPU quantity !", logging.info)
    session = vm.reboot()
    if not utils_misc.check_if_vm_vcpu_match(total_cpus, vm):
        raise error.TestFail("CPU quantity mismatch cmd after hotplug "
                             "and reboot !")

    # Window guest doesn't support online/offline test
    if params['os_type'] == "windows":
        return

    error.context("locating online files for guest's new CPUs")
    r_cmd = 'find /sys/devices/system/cpu/cpu*/online -maxdepth 0 -type f'
    online_files = session.cmd(r_cmd)
    # Sometimes the return value include command line itself
    if "find" in online_files:
        online_files = " ".join(online_files.strip().split("\n")[1:])
    logging.debug("CPU online files detected: %s", online_files)
    online_files = online_files.split()
    online_files.sort()

    if not online_files:
        raise error.TestFail("Could not find CPUs that can be "
                             "enabled/disabled on guest")

    control_path = os.path.join(test.virtdir, "control",
                                "cpu_hotplug.control")

    timeout = int(params.get("cpu_hotplug_timeout", 300))
    error.context("running cpu_hotplug autotest after cpu addition")
    utils_test.run_autotest(vm, session, control_path, timeout,
                            test.outputdir, params)

    # Last, but not least, let's offline/online the CPUs in the guest
    # several times
    irq = 15
    irq_mask = "f0"
    for i in xrange(onoff_iterations):
        session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq))
        for online_file in online_files:
            session.cmd("echo 0 > %s" % online_file)
        for online_file in online_files:
            session.cmd("echo 1 > %s" % online_file)
Example #16
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            utils_package.package_install('gcc', session)
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status, output = session.cmd_status_output(cmd)
        logging.debug(output)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" % (feature,
                                                   guest_version,
                                                   host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" % (guest_version,
                                                   host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu,
                                           sockets=sockets, cores=cores,
                                           threads=threads, add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        logging.debug(virsh.dumpxml(vm_name))
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name, int(max_vcpu), "--live",
                           ignore_status=False, debug=True)
            if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name, save_file, ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
Example #17
0
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_current = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    set_live_vcpus = params.get("set_live_vcpus", "")
    set_config_vcpus = params.get("set_config_vcpus", "")

    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_current
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml
        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if set_live_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     set_live_vcpus,
                                     ignore_status=True,
                                     debug=True)
                vcpus_current = int(set_live_vcpus)
            if set_config_vcpus:
                ret = virsh.setvcpus(vm_name,
                                     set_config_vcpus,
                                     "--config",
                                     ignore_status=True,
                                     debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu[
                            'hotpluggable'] == 'yes':
                        cmd = (
                            "cat %s| grep device_add| grep qemuMonitorIOWrite"
                            "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error(
                                "Failed to find lines about enabled vcpu%s"
                                "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max,
                                  output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if set_live_vcpus:
                crt_live_list = re.findall(
                    r"current.*live.*%s" % set_live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif set_config_vcpus:
                crt_cfg_list = re.findall(
                    r"current.*config.*%s" % set_config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_list = re.findall(
                    r"current.*[config|live].*%s" % vcpus_current, output)
                logging.info("vcpucount crt_list: \n %s", crt_list)
                if len(crt_list) != 2:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_current:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_current, vm):
                test.fail(
                    "cpu number in VM is not correct, it should be %s cpus" %
                    vcpus_current)

            # Check VM xml change for cold-plug/cold-unplug
            if set_config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name,
                                             "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(
                    r"vcpu.*current=.%s.*" % set_config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if cmp(vcpu_items, re_vcpu_items) != 0:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip(
                    '\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #18
0
def run(test, params, env):
    """
    Runs vCPU hotplug tests based on CPU device:
    """

    def hotplug(vm, current_cpus, total_cpus, vcpu_threads):
        for cpu in range(current_cpus, total_cpus):
            error_context.context("hot-pluging vCPU %s" % cpu, logging.info)
            vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
            time.sleep(0.1)
        time.sleep(5)

    def hotunplug(vm, current_cpus, total_cpus, vcpu_threads):
        for cpu in range(current_cpus, total_cpus):
            error_context.context("hot-unpluging vCPU %s" % cpu, logging.info)
            vm.hotplug_vcpu(cpu_id=cpu, plug_command=unplug_cmd, unplug="yes")
            time.sleep(0.1)
        # Need more time to unplug, so sleeping more than hotplug.
        time.sleep(10)

    def verify(vm, total_cpus):
        output = vm.monitor.send_args_cmd("info cpus")
        logging.debug("Output of info CPUs:\n%s", output)

        cpu_regexp = re.compile(r"CPU #(\d+)")
        total_cpus_monitor = len(cpu_regexp.findall(output))
        if total_cpus_monitor != total_cpus:
            test.fail("Monitor reports %s CPUs, when VM should have"
                      " %s" % (total_cpus_monitor, total_cpus))
        error_context.context("hotplugging finished, let's wait a few sec and"
                              " check CPUs quantity in guest.", logging.info)
        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
                                   total_cpus, vm),
                                   60 + total_cpus, first=10,
                                   step=5.0, text="retry later"):
            test.fail("CPU quantity mismatch cmd after hotplug !")
        error_context.context("rebooting the vm and check CPU quantity !",
                              logging.info)
        session = vm.reboot()
        if not utils_misc.check_if_vm_vcpu_match(total_cpus, vm):
            test.fail("CPU quantity mismatch cmd after hotplug and reboot !")

    error_context.context("boot the vm, with '-smp X,maxcpus=Y' option,"
                          "thus allow hotplug vcpu", logging.info)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    n_cpus_add = int(params.get("n_cpus_add", 1))
    n_cpus_remove = int(params.get("n_cpus_remove", 1))
    maxcpus = int(params.get("maxcpus", 240))
    current_cpus = int(params.get("smp", 2))
    onoff_iterations = int(params.get("onoff_iterations", 20))
    hotplug_cmd = params.get("cpu_hotplug_cmd", "")
    unplug_cmd = params.get("cpu_hotunplug_cmd", "")
    vcpu_cores = int(params.get("vcpu_cores", 1))
    vcpu_threads = int(params.get("vcpu_threads", 1))
    cpu_model = params.get("cpu_model", "host")
    unplug = params.get("unplug", "no")
    total_cpus = current_cpus

    if unplug == "yes":
        n_cpus_add = n_cpus_remove

    hotplug_cmd = hotplug_cmd.replace("CPU_MODEL", cpu_model)

    if (n_cpus_add * vcpu_threads) + current_cpus > maxcpus:
        logging.warn("CPU quantity more than maxcpus, set it to %s", maxcpus)
        total_cpus = maxcpus
    else:
        total_cpus = current_cpus + (n_cpus_add * vcpu_threads)

    logging.info("current_cpus=%s, total_cpus=%s", current_cpus, total_cpus)
    error_context.context("check if CPUs in guest matches qemu cmd "
                          "before hot-plug", logging.info)
    if not utils_misc.check_if_vm_vcpu_match(current_cpus, vm):
        test.error("CPU quantity mismatch cmd before hotplug !")
    hotplug(vm, current_cpus, total_cpus, vcpu_threads)
    verify(vm, total_cpus)

    if unplug == "yes":
        hotunplug(vm, current_cpus, total_cpus, vcpu_threads)

        total_cpus = total_cpus - (n_cpus_remove * vcpu_threads)
        if total_cpus <= 0:
            total_cpus = current_cpus
        verify(vm, total_cpus)
Example #19
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {
            'intremap': 'on',
            'eim': 'on'
        })
        vmxml.add_device(iommu_device)

    try:
        # Configure a guest vcpu > 255 without iommu device
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name))

            if status_error:
                if start_fail:
                    result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                virsh.start(vm_name, debug=True)
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                if not utils_misc.wait_for(
                        lambda: utils_misc.check_if_vm_vcpu_match(
                            int(guest_vcpu), vm),
                        timeout=60,
                        step=5,
                        text="wait for vcpu online"):
                    test.fail('Not all CPU(s) are online')

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
Example #20
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {'intremap': 'on', 'eim': 'on'})
        vmxml.add_device(iommu_device)

    try:
        # Configure a guest vcpu > 255 without iommu device
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name))

            if status_error:
                if start_fail:
                    result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                virsh.start(vm_name, debug=True)
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                if not utils_misc.wait_for(
                        lambda: utils_misc.check_if_vm_vcpu_match(int(guest_vcpu), vm),
                        timeout=60, step=5, text="wait for vcpu online"):
                    test.fail('Not all CPU(s) are online')

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
def run(test, params, env):
    """
    Test: vcpu hotplug.

    The command can change the number of virtual CPUs for VM.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    min_count = int(params.get("setvcpus_min_count", "1"))
    max_count = int(params.get("setvcpus_max_count", "2"))
    test_times = int(params.get("setvcpus_test_times", "1"))
    stress_type = params.get("stress_type", "")
    stress_param = params.get("stress_param", "")
    add_by_virsh = ("yes" == params.get("add_by_virsh"))
    del_by_virsh = ("yes" == params.get("del_by_virsh"))
    hotplug_timeout = int(params.get("hotplug_timeout", 30))
    test_set_max = max_count * 2

    # Save original configuration
    orig_config_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Set min/max of vcpu
    libvirt_xml.VMXML.set_vm_vcpus(vm_name, test_set_max, min_count,
                                   topology_correction=True)

    # prepare VM instance
    vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache"))

    # prepare guest-agent service
    vm.prepare_guest_agent()

    # Increase the workload
    load_vms = []
    if stress_type in ['cpu', 'memory', 'io']:
        params["stress_args"] = stress_param
    load_vms.append(vm)
    if stress_type in ['cpu', 'memory']:
        utils_test.load_stress("stress_in_vms", params, vms=load_vms)
    else:
        utils_test.load_stress("iozone_in_vms", params, vms=load_vms)

    session = vm.wait_for_login()
    try:
        # Clear dmesg before set vcpu
        session.cmd("dmesg -c")
        for i in range(test_times):
            # 1. Add vcpu
            add_result = libvirt.hotplug_domain_vcpu(vm,
                                                     max_count,
                                                     add_by_virsh)
            add_status = add_result.exit_status
            # 1.1 check add status
            if add_status:
                if add_result.stderr.count("support"):
                    test.cancel("vcpu hotplug not supported, "
                                "no need to test any more:\n %s"
                                % add_result.stderr.strip())
                test.fail("Test failed for:\n %s"
                          % add_result.stderr.strip())
            if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(max_count, vm),
                                       hotplug_timeout,
                                       text="wait for vcpu online"):
                test.fail("vcpu hotplug failed")

            if 'ppc' not in platform.machine():
                # 1.2 check dmesg
                domain_add_dmesg = session.cmd_output("dmesg -c")
                dmesg1 = "CPU%d has been hot-added" % (max_count - 1)
                dmesg2 = "CPU %d got hotplugged" % (max_count - 1)
                if (not domain_add_dmesg.count(dmesg1) and
                        not domain_add_dmesg.count(dmesg2)):
                    test.fail("Cannot find hotplug info in dmesg: %s"
                              % domain_add_dmesg)
            # 1.3 check cpu related file
            online_cmd = "cat /sys/devices/system/cpu/cpu%d/online" \
                         % (max_count - 1)
            st, ot = session.cmd_status_output(online_cmd)
            if st:
                test.fail("Cannot find CPU%d after hotplug"
                          % (max_count - 1))
            # 1.4 check online
            if not ot.strip().count("1"):
                test.fail("CPU%d is not online after hotplug: %s"
                          % ((max_count - 1), ot))
            # 1.5 check online interrupts info
            inter_on_output = session.cmd_output("cat /proc/interrupts")
            if not inter_on_output.count("CPU%d" % (int(max_count) - 1)):
                test.fail("CPU%d can not be found in "
                          "/proc/interrupts when it's online:%s"
                          % ((int(max_count) - 1), inter_on_output))
            # 1.6 offline vcpu
            off_st = session.cmd_status("echo 0 > "
                                        "/sys/devices/system/cpu/cpu%d/online"
                                        % (max_count - 1))
            if off_st:
                test.fail("Set cpu%d offline failed!"
                          % (max_count - 1))
            # 1.7 check offline interrupts info
            inter_off_output = session.cmd_output("cat /proc/interrupts")
            if inter_off_output.count("CPU%d" % (int(max_count) - 1)):
                test.fail("CPU%d can be found in /proc/interrupts"
                          " when it's offline"
                          % (int(max_count) - 1))
            # 1.8 online vcpu
            on_st = session.cmd_status("echo 1 > "
                                       "/sys/devices/system/cpu/cpu%d/online"
                                       % (max_count - 1))
            if on_st:
                test.fail("Set cpu%d online failed!"
                          % (max_count - 1))
            # 2. Del vcpu
            del_result = libvirt.hotplug_domain_vcpu(vm,
                                                     min_count,
                                                     del_by_virsh,
                                                     hotplug=False)
            del_status = del_result.exit_status
            if del_status:
                logging.info("del_result: %s" % del_result.stderr.strip())
                # A qemu older than 1.5 or an unplug for 1.6 will result in
                # the following failure.
                # TODO: when CPU-hotplug feature becomes stable and strong,
                #       remove these codes used to handle kinds of exceptions
                if re.search("The command cpu-del has not been found",
                             del_result.stderr):
                    test.cancel("vcpu hotunplug not supported")
                if re.search("cannot change vcpu count", del_result.stderr):
                    test.cancel("unhotplug failed")
                if re.search("got wrong number of vCPU pids from QEMU monitor",
                             del_result.stderr):
                    test.cancel("unhotplug failed")
                # process all tips that contains keyword 'support'
                # for example, "unsupported"/"hasn't been support" and so on
                if re.search("support", del_result.stderr):
                    test.cancel("vcpu hotunplug not supported")

                # besides above, regard it failed
                test.fail("Test fail for:\n %s"
                          % del_result.stderr.strip())
            if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(min_count, vm),
                                       hotplug_timeout,
                                       text="wait for vcpu offline"):
                test.fail("vcpu hotunplug failed")
            if 'ppc' not in platform.machine():
                domain_del_dmesg = session.cmd_output("dmesg -c")
                if not domain_del_dmesg.count("CPU %d is now offline"
                                              % (max_count - 1)):
                    test.fail("Cannot find hot-unplug info in dmesg: %s"
                              % domain_del_dmesg)
    except exceptions.TestCancel:
        # So far, QEMU doesn't support unplug vcpu,
        # unplug operation will encounter kind of errors.
        pass
    finally:
        utils_test.unload_stress("stress_in_vms", params, load_vms)
        if session:
            session.close()
        # Cleanup
        orig_config_xml.sync()
def run(test, params, env):
    """
    Test vcpu hotpluggable item in xml

    1. Set the libvirtd log filter/level/file
    2. Restart libvirtd
    3. Start vm by xml with vcpu hotpluggable
    4. Check the qemu command line
    5. Check the libvirtd log
    6. Restart libvrtd
    7. Check the vm xml
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vcpus_placement = params.get("vcpus_placement", "static")
    vcpus_crt = int(params.get("vcpus_current", "4"))
    vcpus_max = int(params.get("vcpus_max", "8"))
    vcpus_enabled = params.get("vcpus_enabled", "")
    vcpus_hotplug = params.get("vcpus_hotpluggable", "")
    vcpus_order = params.get("vcpus_order")
    err_msg = params.get("err_msg", "")
    config_libvirtd = params.get("config_libvirtd", "yes") == "yes"
    log_file = params.get("log_file", "libvirtd.log")
    live_vcpus = params.get("set_live_vcpus", "")
    config_vcpus = params.get("set_config_vcpus", "")
    enable_vcpu = params.get("set_enable_vcpu", "")
    disable_vcpu = params.get("set_disable_vcpu", "")
    # Install cgroup utils
    cgutils = "libcgroup-tools"
    if "ubuntu" in platform.dist()[0].lower():
        cgutils = "cgroup-tools"
    sm = SoftwareManager()
    if not sm.check_installed(cgutils) and not sm.install(cgutils):
        test.cancel("cgroup utils package install failed")
    # Backup domain XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Configure libvirtd log
        if config_libvirtd:
            config_path = os.path.join(data_dir.get_tmp_dir(), log_file)
            with open(config_path, 'a') as f:
                pass
            config = utils_config.LibvirtdConfig()
            log_outputs = "1:file:%s" % config_path
            config.log_outputs = log_outputs
            config.log_level = 1
            config.log_filters = "1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"

            # Restart libvirtd to make the changes take effect in libvirt
            libvirtd.restart()

        # Set vcpu: placement,current,max vcpu
        vmxml.placement = vcpus_placement
        vmxml.vcpu = vcpus_max
        vmxml.current_vcpu = vcpus_crt
        del vmxml.cpuset

        # Create vcpu xml with vcpu hotpluggable and order
        vcpu_list = []
        vcpu = {}
        en_list = vcpus_enabled.split(",")
        hotplug_list = vcpus_hotplug.split(",")
        order_dict = ast.literal_eval(vcpus_order)

        for vcpu_id in range(vcpus_max):
            vcpu['id'] = str(vcpu_id)
            if str(vcpu_id) in en_list:
                vcpu['enabled'] = 'yes'
                if str(vcpu_id) in order_dict:
                    vcpu['order'] = order_dict[str(vcpu_id)]
            else:
                vcpu['enabled'] = 'no'
            if str(vcpu_id) in hotplug_list:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(copy.copy(vcpu))
            vcpu = {}

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list

        vmxml.vcpus = vcpus_xml

        # Remove influence from topology setting
        try:
            logging.info('Remove influence from topology setting')
            cpuxml = vmxml.cpu
            del cpuxml.topology
            vmxml.cpu = cpuxml
        except Exception as e:
            pass

        vmxml.sync()

        # Start VM
        logging.info("Start VM with vcpu hotpluggable and order...")
        ret = virsh.start(vm_name, ignore_status=True)

        if err_msg:
            libvirt.check_result(ret, err_msg)
        else:
            # Wait for domain
            vm.wait_for_login()

            if enable_vcpu:
                ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable",
                                    ignore_status=False, debug=True)
                vcpus_crt += 1
            if disable_vcpu:
                ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable",
                                    ingnore_status=False, debug=True)
                vcpus_crt -= 1
            if live_vcpus:
                ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False,
                                     debug=True)
                vcpus_crt = int(live_vcpus)
            if config_vcpus:
                ret = virsh.setvcpus(vm_name, config_vcpus, "--config",
                                     ignore_status=False, debug=True)

            # Check QEMU command line
            cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max))
            ret = process.run(cmd, ignore_status=False, shell=True)
            if ret.exit_status != 0:
                logging.error("Maxcpus in QEMU command line is wrong!")

            # Check libvirtd log
            if config_libvirtd:
                for vcpu in vcpu_list:
                    if vcpu['enabled'] == 'yes' and vcpu['hotpluggable'] == 'yes':
                        cmd = ("cat %s| grep device_add| grep qemuMonitorIOWrite"
                               "| grep 'vcpu%s'" % (config_path, vcpu['id']))
                        ret = process.run(cmd, ignore_status=False, shell=True)
                        if ret.exit_status != 0:
                            logging.error("Failed to find lines about enabled vcpu%s"
                                          "in libvirtd log.", vcpu['id'])

            # Dumpxml
            dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            vcpu_items = re.findall(r"vcpu.*", dump_xml)

            # Check guest vcpu count
            ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output)
            if len(max_list) != 2:
                test.fail("vcpucount maximum info is not correct.")

            if live_vcpus:
                crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current live info is not correct.")
            elif config_vcpus:
                crt_cfg_list = re.findall(r"current.*config.*%s" % config_vcpus, output)
                logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list)
                if len(crt_cfg_list) != 1:
                    test.fail("vcpucount: current config info is not correct.")
            else:
                crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output)
                logging.info("vcpucount crt_live_list: \n %s", crt_live_list)
                if len(crt_live_list) != 1:
                    test.fail("vcpucount: current info is not correct.")

            # Check guest vcpu info
            ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True)
            output = ret.stdout.strip()
            vcpu_lines = re.findall(r"VCPU:.*\n", output)
            logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines)
            if len(vcpu_lines) != vcpus_crt:
                test.fail("vcpuinfo is not correct.")

            # Check cpu in guest
            if not utils_misc.check_if_vm_vcpu_match(vcpus_crt, vm):
                test.fail("cpu number in VM is not correct, it should be %s cpus" % vcpus_crt)

            # Check VM xml change for cold-plug/cold-unplug
            if config_vcpus:
                inactive_xml = virsh.dumpxml(vm_name, "--inactive").stdout.strip()
                crt_vcpus_xml = re.findall(r"vcpu.*current=.%s.*" %
                                           config_vcpus, inactive_xml)
                logging.info("dumpxml --inactive xml: \n %s", crt_vcpus_xml)
                if len(crt_vcpus_xml) != 1:
                    test.fail("Dumpxml with --inactive,"
                              "the vcpu current is not correct.")

            # Restart libvirtd
            libvirtd.restart()

            # Recheck VM xml
            re_dump_xml = virsh.dumpxml(vm_name).stdout.strip()
            re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml)
            if vcpu_items != re_vcpu_items:
                test.fail("After restarting libvirtd,"
                          "VM xml changed unexpectedly.")

            # Check cgroup info
            en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml)
            for vcpu_sn in range(len(en_vcpu_list)):
                vcpu_id = en_vcpu_list[vcpu_sn].split("=")[1].split()[0].strip('\'')
                cmd = ("lscgroup| grep cpuset| grep %s| grep vcpu%s" %
                       (vm_name[-3:], vcpu_id))
                ret = process.run(cmd, ignore_status=False, shell=True)
                if ret.exit_status != 0:
                    test.fail("Failed to find lines about enabled vcpu%s"
                              "in lscgroup info." % vcpu_id)
    finally:
        # Recover libvirtd configration
        if config_libvirtd:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)

        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Example #23
0
def run(test, params, env):
    """
    Test: vcpu hotplug.

    The command can change the number of virtual CPUs for VM.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    min_count = int(params.get("setvcpus_min_count", "1"))
    max_count = int(params.get("setvcpus_max_count", "2"))
    test_times = int(params.get("setvcpus_test_times", "1"))
    stress_type = params.get("stress_type", "")
    stress_param = params.get("stress_param", "")
    add_by_virsh = ("yes" == params.get("add_by_virsh"))
    del_by_virsh = ("yes" == params.get("del_by_virsh"))
    hotplug_timeout = int(params.get("hotplug_timeout", 30))
    test_set_max = max_count * 2

    # Save original configuration
    orig_config_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Set min/max of vcpu
    libvirt_xml.VMXML.set_vm_vcpus(vm_name,
                                   test_set_max,
                                   min_count,
                                   topology_correction=True)

    # prepare VM instance
    vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get("address_cache"))

    # prepare guest-agent service
    vm.prepare_guest_agent()

    # Increase the workload
    load_vms = []
    if stress_type in ['cpu', 'memory', 'io']:
        params["stress_args"] = stress_param
    load_vms.append(vm)
    if stress_type in ['cpu', 'memory']:
        utils_test.load_stress("stress_in_vms", params, vms=load_vms)
    else:
        utils_test.load_stress("iozone_in_vms", params, vms=load_vms)

    session = vm.wait_for_login()
    try:
        # Clear dmesg before set vcpu
        session.cmd("dmesg -c")
        for i in range(test_times):
            # 1. Add vcpu
            add_result = libvirt.hotplug_domain_vcpu(vm, max_count,
                                                     add_by_virsh)
            add_status = add_result.exit_status
            # 1.1 check add status
            if add_status:
                if add_result.stderr.count("support"):
                    test.cancel("vcpu hotplug not supported, "
                                "no need to test any more:\n %s" %
                                add_result.stderr.strip())
                test.fail("Test failed for:\n %s" % add_result.stderr.strip())
            if not utils_misc.wait_for(
                    lambda: utils_misc.check_if_vm_vcpu_match(max_count, vm),
                    hotplug_timeout,
                    text="wait for vcpu online"):
                test.fail("vcpu hotplug failed")

            if 'ppc' not in platform.machine():
                # 1.2 check dmesg
                domain_add_dmesg = session.cmd_output("dmesg -c")
                dmesg1 = "CPU%d has been hot-added" % (max_count - 1)
                dmesg2 = "CPU %d got hotplugged" % (max_count - 1)
                if (not domain_add_dmesg.count(dmesg1)
                        and not domain_add_dmesg.count(dmesg2)):
                    test.fail("Cannot find hotplug info in dmesg: %s" %
                              domain_add_dmesg)
            # 1.3 check cpu related file
            online_cmd = "cat /sys/devices/system/cpu/cpu%d/online" \
                         % (max_count - 1)
            st, ot = session.cmd_status_output(online_cmd)
            if st:
                test.fail("Cannot find CPU%d after hotplug" % (max_count - 1))
            # 1.4 check online
            if not ot.strip().count("1"):
                test.fail("CPU%d is not online after hotplug: %s" %
                          ((max_count - 1), ot))
            # 1.5 check online interrupts info
            inter_on_output = session.cmd_output("cat /proc/interrupts")
            if not inter_on_output.count("CPU%d" % (int(max_count) - 1)):
                test.fail("CPU%d can not be found in "
                          "/proc/interrupts when it's online:%s" %
                          ((int(max_count) - 1), inter_on_output))
            # 1.6 offline vcpu
            off_st = session.cmd_status(
                "echo 0 > "
                "/sys/devices/system/cpu/cpu%d/online" % (max_count - 1))
            if off_st:
                test.fail("Set cpu%d offline failed!" % (max_count - 1))
            # 1.7 check offline interrupts info
            inter_off_output = session.cmd_output("cat /proc/interrupts")
            if inter_off_output.count("CPU%d" % (int(max_count) - 1)):
                test.fail("CPU%d can be found in /proc/interrupts"
                          " when it's offline" % (int(max_count) - 1))
            # 1.8 online vcpu
            on_st = session.cmd_status("echo 1 > "
                                       "/sys/devices/system/cpu/cpu%d/online" %
                                       (max_count - 1))
            if on_st:
                test.fail("Set cpu%d online failed!" % (max_count - 1))
            # 2. Del vcpu
            del_result = libvirt.hotplug_domain_vcpu(vm,
                                                     min_count,
                                                     del_by_virsh,
                                                     hotplug=False)
            del_status = del_result.exit_status
            if del_status:
                logging.info("del_result: %s" % del_result.stderr.strip())
                # A qemu older than 1.5 or an unplug for 1.6 will result in
                # the following failure.
                # TODO: when CPU-hotplug feature becomes stable and strong,
                #       remove these codes used to handle kinds of exceptions
                if re.search("The command cpu-del has not been found",
                             del_result.stderr):
                    test.cancel("vcpu hotunplug not supported")
                if re.search("cannot change vcpu count", del_result.stderr):
                    test.cancel("unhotplug failed")
                if re.search("got wrong number of vCPU pids from QEMU monitor",
                             del_result.stderr):
                    test.cancel("unhotplug failed")
                # process all tips that contains keyword 'support'
                # for example, "unsupported"/"hasn't been support" and so on
                if re.search("support", del_result.stderr):
                    test.cancel("vcpu hotunplug not supported")

                # besides above, regard it failed
                test.fail("Test fail for:\n %s" % del_result.stderr.strip())
            if not utils_misc.wait_for(
                    lambda: utils_misc.check_if_vm_vcpu_match(min_count, vm),
                    hotplug_timeout,
                    text="wait for vcpu offline"):
                test.fail("vcpu hotunplug failed")
            if 'ppc' not in platform.machine():
                domain_del_dmesg = session.cmd_output("dmesg -c")
                if not domain_del_dmesg.count("CPU %d is now offline" %
                                              (max_count - 1)):
                    test.fail("Cannot find hot-unplug info in dmesg: %s" %
                              domain_del_dmesg)
    except exceptions.TestCancel:
        # So far, QEMU doesn't support unplug vcpu,
        # unplug operation will encounter kind of errors.
        pass
    finally:
        utils_test.unload_stress("stress_in_vms", params, load_vms)
        if session:
            session.close()
        # Cleanup
        orig_config_xml.sync()
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_vcpu = True

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num)
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num_bk,
                                                                 {},
                                                                 setvcpu_option)

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                 expect_vcpu_num,
                                                                 expect_vcpupin,
                                                                 setvcpu_option)

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        result_vcpu = utils_hotplug.check_vcpu_value(vm,
                                                                     expect_vcpu_num,
                                                                     expect_vcpupin,
                                                                     setvcpu_option)
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if not result_vcpu:
            test.fail("Test Failed")
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml,
       vcpupin and inside domain.
    3. Plug vcpu for the domain.
    4. Repeat step 2 to check again.
    5. Control domain(save, managedsave, s3, s4, etc.).
    6. Repeat step 2 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 2 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 2 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave related actions).
    12. Repeat step 2 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 2 to check again.
    15. Recover test environment.
    """

    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")

    def online_new_vcpu(vm, vcpu_plug_num):
        """
        For Fedora/RHEL7 guests, udev can not online hot-added CPUs
        automatically, (refer to BZ#968811 for details) so enable them manually.

        :params vm: VM object
        :params vcpu_plug_num: Hotplugged vcpu count
        """
        cpu_is_online = []
        session = vm.wait_for_login()
        for i in range(1, int(vcpu_plug_num)):
            cpu_is_online.append(False)
            cpu = "/sys/devices/system/cpu/cpu%s/online" % i
            cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu)
            logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o)
            if cmd_s != 0:
                logging.error("Can not find cpu %s in domain", i)
            else:
                if cmd_o.strip() == "0":
                    if session.cmd_status("echo 1 > %s" % cpu) == 0:
                        cpu_is_online[i-1] = True
                    else:
                        logging.error("Fail to enable cpu %s online", i)
                else:
                    cpu_is_online[i-1] = True
        session.close()
        return False not in cpu_is_online

    def check_setvcpus_result(cmd_result, expect_error):
        """
        Check command result.

        For setvcpus, pass unsupported commands(plug or unplug vcpus) by
        checking command stderr.

        :params cmd_result: Command result
        :params expect_error: Whether to expect error True or False
        """
        if cmd_result.exit_status != 0:
            if expect_error:
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         cmd_result.stderr):
                test.cancel("guest <os> machine property may be too"
                            "  old to allow hotplug")

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         cmd_result.stderr):
                test.cancel("Unsupport virsh setvcpu hotplug")

            # Maybe QEMU doesn't support unplug vcpu
            if re.search("Operation not supported: qemu didn't unplug the vCPUs",
                         cmd_result.stderr):
                test.cancel("Your qemu unsupport unplug vcpu")

            # Qemu guest agent version could be too low
            if re.search("The command guest-get-vcpus has not been found",
                         cmd_result.stderr):
                err_msg = "Your agent version is too low: %s" % cmd_result.stderr
                logging.warning(err_msg)
                test.cancel(err_msg)

            # Attempting to enable more vCPUs in the guest than is currently
            # enabled in the guest but less than the maximum count for the VM
            if re.search("requested vcpu count is greater than the count of "
                         "enabled vcpus in the domain",
                         cmd_result.stderr):
                logging.debug("Expect fail: %s", cmd_result.stderr)
                return

            # Otherwise, it seems we have a real error
            test.fail("Run failed with right command: %s"
                      % cmd_result.stderr)
        else:
            if expect_error:
                test.fail("Expect fail but run successfully")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_uptime_init = 0
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = int(params.get("vcpu_plug_num"))
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = int(params.get("vcpu_unplug_num"))
    vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480"))
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")
    with_stress = "yes" == params.get("run_stress", "no")
    iterations = int(params.get("test_itr", 1))
    topology_correction = "yes" == params.get("topology_correction", "no")
    # Init expect vcpu count values
    expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num,
                       'cur_config': vcpu_current_num,
                       'cur_live': vcpu_current_num,
                       'guest_live': vcpu_current_num}
    if check_after_plug_fail:
        expect_vcpu_num_bk = expect_vcpu_num.copy()
    # Init expect vcpu pin values
    expect_vcpupin = {}
    result_failed = 0

    # Init cpu-list for vcpupin
    host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF')
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        test.cancel("We need more cpus on host in this case for the cpu-list"
                    "=%s. But current number of cpu on host is %s."
                    % (pin_cpu_list, host_cpu_count))

    cpus_list = cpu_util.cpu_online_list()
    logging.debug("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        # Do not apply S3/S4 on power
        cpu_arch = platform.machine()
        if cpu_arch in ('x86_64', 'i386', 'i686'):
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()
        vm_uptime_init = vm.uptime()
        if with_stress:
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        for _ in range(iterations):
            if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num):
                logging.error("Expected vcpu check failed")
                result_failed += 1
            # plug vcpu
            if vcpu_plug:
                # Pin vcpu
                if pin_before_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                check_setvcpus_result(result, status_error)

                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_plug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_plug_num
                    expect_vcpu_num['guest_live'] = vcpu_plug_num
                    if not status_error:
                        if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm),
                                                   vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num):
                            test.fail("Fail to enable new added cpu")

                # Pin vcpu
                if pin_after_plug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if status_error and check_after_plug_fail:
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time(QEMU bug),
                    # here we wait for 10 mins then skip the remaining part of
                    # tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_plug_num
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_plug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

            # Unplug vcpu
            # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable
            # when VM started , and it required that vcpu 0(id=1) is always
            # present and non-hotpluggable, which means we can't hotunplug these
            # vcpus directly. So we can either hotplug more vcpus before we do
            # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the
            # vcpus except vcpu 0, to make sure libvirt can find appropriate
            # hotpluggable vcpus to reach the desired target vcpu count. For
            # simple prepare step, here we choose to hotplug more vcpus.
            if vcpu_unplug:
                if setvcpu_option == "--live":
                    logging.info("Hotplug vcpu to the maximum count to make"
                                 "sure all these new plugged vcpus are "
                                 "hotunpluggable")
                    result = virsh.setvcpus(vm_name, vcpu_max_num, '--live',
                                            debug=True)
                    libvirt.check_exit_status(result)
                # Pin vcpu
                if pin_before_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    # As the vcpu will unplug later, so set
                    # expect_vcpupin to empty
                    expect_vcpupin = {}

                # Operation of setvcpus is asynchronization, even if it return,
                # may not mean it is complete, a poll checking of guest vcpu numbers
                # need to be executed.
                # So for case of unpluging vcpus from max vcpu number to 1, when
                # setvcpus return, need continue to obverse if vcpu number is
                # continually to be unplugged to 1 gradually.
                result = virsh.setvcpus(vm_name, vcpu_unplug_num,
                                        setvcpu_option,
                                        readonly=setvcpu_readonly,
                                        ignore_status=True, debug=True)
                unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str()
                if unsupport_str and (unsupport_str in result.stderr):
                    test.cancel("Vcpu hotunplug is not supported in this host:"
                                "\n%s" % result.stderr)
                try:
                    session = vm.wait_for_login()
                    cmd = "lscpu | grep \"^CPU(s):\""
                    operation = "setvcpus"
                    prev_output = -1
                    while True:
                        ret, output = session.cmd_status_output(cmd)
                        if ret:
                            test.error("Run lscpu failed, output: %s" % output)
                        output = output.split(":")[-1].strip()

                        if int(prev_output) == int(output):
                            break
                        prev_output = output
                        time.sleep(5)
                    logging.debug("CPUs available from inside guest after %s - %s",
                                  operation, output)
                    if int(output) != vcpu_unplug_num:
                        test.fail("CPU %s failed as cpus are not "
                                  "reflected from inside guest" % operation)
                finally:
                    if session:
                        session.close()

                check_setvcpus_result(result, status_error)
                if setvcpu_option == "--config":
                    expect_vcpu_num['cur_config'] = vcpu_unplug_num
                elif setvcpu_option == "--guest":
                    # vcpuset '--guest' only affect vcpu number in guest
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num
                else:
                    expect_vcpu_num['cur_live'] = vcpu_unplug_num
                    expect_vcpu_num['guest_live'] = vcpu_unplug_num

                # Pin vcpu
                if pin_after_unplug:
                    result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list,
                                           ignore_status=True, debug=True)
                    libvirt.check_exit_status(result)
                    expect_vcpupin = {pin_vcpu: pin_cpu_list}

                if not status_error:
                    if restart_libvirtd:
                        utils_libvirtd.libvirtd_restart()

                    # Check vcpu number and related commands
                    if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                        logging.error("Expected vcpu check failed")
                        result_failed += 1

                    # Control domain
                    manipulate_domain(vm_name, vm_operation)

                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1

                    # Recover domain
                    manipulate_domain(vm_name, vm_operation, recover=True)

                    # Resume domain from S4 status may takes long time
                    # (QEMU bug), here we wait for 10 mins then skip the
                    # remaining part of tests if domain not resume successfully
                    try:
                        vm.wait_for_login(timeout=600)
                    except Exception as e:
                        test.warn("Skip remaining test steps as domain"
                                  " not resume in 10 mins: %s" % e)
                    # For hotplug/unplug vcpu without '--config flag, after
                    # suspend domain to disk(shut off) and re-start it, the
                    # current live vcpu number will recover to orinial value
                    if vm_operation == 's4':
                        if setvcpu_option.count("--config"):
                            expect_vcpu_num['cur_live'] = vcpu_unplug_num
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        elif setvcpu_option.count("--guest"):
                            expect_vcpu_num['guest_live'] = vcpu_unplug_num
                        else:
                            expect_vcpu_num['cur_live'] = vcpu_current_num
                            expect_vcpu_num['guest_live'] = vcpu_current_num
                    if vm_operation != "null":
                        # Check vcpu number and related commands
                        if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option):
                            logging.error("Expected vcpu check failed")
                            result_failed += 1
        if vm.uptime() < vm_uptime_init:
            test.fail("Unexpected VM reboot detected in between test")
    # Recover env
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        if with_stress:
            bt.join(ignore_status=True)
        vm.destroy()
        backup_xml.sync()

    if not status_error:
        if result_failed > 0:
            test.fail("Test Failed")