def run(test, params, env):
    """
    Test to change the kernel param based on user input.

    1. Prepare test environment, boot the guest
    2. Change the kernel parameter as per user input
    3. Reboot the guest and check whether /proc/cmdline reflects
    4. Check the boot log in guest dmesg and validate
    5. Perform any test operation if any, based on kernel param change
    6. Recover test environment
    """
    vms = params.get("vms").split()
    kernel_param = params.get("kernel_param", "quiet")
    kernel_param_remove = params.get("kernel_param_remove", "")
    if not kernel_param:
        kernel_param = None
    if not kernel_param_remove:
        kernel_param_remove = None
    cpu_check = params.get("hardware", "").upper()
    boot_log = params.get("boot_log", None)
    check_cmdline_only = "yes" == params.get("check_cmdline_only", "no")
    status_error = params.get("status_error", "no") == "yes"
    vm_dict = {}
    vm_list = env.get_all_vms()
    # To ensure host that doesn't support Radix MMU gets skipped
    if cpu_check:
        cpu_model = cpu.get_cpu_info()['Model name'].upper()
        if cpu_check not in cpu_model:
            logging.info("This test will work for %s", cpu_check)
            test.skip("Test is not applicable for %s" % cpu_model)
    # back up vmxml
    for vm_name in vms:
        vm_dict[vm_name] = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        for vm in vm_list:
            session = vm.wait_for_login()
            if check_cmdline_only:
                check_cmdline(
                    session,
                    params.get(
                        "expect_in_cmdline",
                        "expect_in_cmdline not defined in test configuration"))
            else:
                utils_test.update_boot_option(vm,
                                              args_added=kernel_param,
                                              args_removed=kernel_param_remove,
                                              need_reboot=True)
            if boot_log:
                session = vm.wait_for_login()
                # To ensure guest that doesn't support Radix MMU gets skipped
                if cpu_check:
                    cmd = "grep cpu /proc/cpuinfo | awk '{print $3}' | "
                    cmd += "head -n 1"
                    status, output = session.cmd_status_output(cmd)
                    if status:
                        test.error("couldn't get cpu information from guest "
                                   "%s" % vm.name)
                    if cpu_check not in output.upper() and "radix" in boot_log:
                        test.skip("radix MMU not supported in %s" % output)
                status, output = session.cmd_status_output("dmesg")
                if status:
                    logging.error(output)
                    test.error("unable to get dmesg from guest: %s" % vm.name)
                if status_error:
                    if boot_log in output:
                        test.fail("Able to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("unable to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
                else:
                    if boot_log not in output:
                        test.fail("unable to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("Able to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
            if session:
                session.close()
    finally:
        # close the session and recover the vms
        if session:
            session.close()
        for vm in vm_list:
            vm.destroy()
            vm_dict[vm.name].sync()
Ejemplo n.º 2
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_arch = platform.machine()
        if not re.match(cpu_model_nodeinfo, cpu_arch):
            test.fail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = process.run(cmd, ignore_status=True,
                                  shell=True).stdout.strip()
        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = process.run(cmd, ignore_status=True,
                                 shell=True).stdout.strip()
        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'ppc' in cpu_arch:
                if cpus_nodeinfo != cpus_total:
                    test.fail("Virsh nodeinfo output of CPU(s) on"
                              " ppc did not match all threads in "
                              "the system")
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock|BogoMIPS' | "
               "head -n1 | awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cpu_frequency_os = cmd_result.stdout_text.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if (float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or
                float(diffval) / float(cpu_frequency_os) > 0.20):
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        total_sockets_in_node = int(cmd_result.stdout_text.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cores_per_socket_os = cmd_result.stdout_text.strip()
        spec_numa = False
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            # for spec NUMA arch, the output of nodeinfo is in a spec format
            cpus_os = cpu.get_cpu_info().get("CPU(s)")
            numa_cells_nodeinfo = _check_nodeinfo(
                nodeinfo_output, 'NUMA cell(s)', 3)
            if (re.match(cores_per_socket_nodeinfo, cpus_os) and
                    re.match(numa_cells_nodeinfo, "1")):
                spec_numa = True
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "CPU(s) or Core(s) per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            test.fail("Virsh nodeinfo output didn't match Core(s) "
                      "per socket of virsh capabilities output")
        # Check Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if not spec_numa:
            if threads_per_core_nodeinfo != cpu_topology['threads']:
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        else:
            if threads_per_core_nodeinfo != "1":
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        if libvirt_version.version_compare(2, 0, 0):
            for i in node_online_list:
                node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
                memory_size_os += int(node_memory)
        else:
            memory_size_os = utils_memory.memtotal()
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            test.fail("Virsh nodeinfo output didn't match "
                      "Memory size")
Ejemplo n.º 3
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Ejemplo n.º 4
0
def run(test, params, env):
    """
    Test command: virsh guestvcpus

    The command query or modify state of vcpu in the vm
    1. Prepare test environment, start vm with guest agent
    2. Perform virsh guestvcpus query/enable/disable operation
    3. Check the cpus in the vm
    4. Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpus_num = int(params.get("vcpus_num", "20"))
    vcpus_placement = params.get("vcpus_placement", "static")
    option = params.get("option", "")
    combine = params.get("combine", "")
    invalid_domain = params.get("invalid_domain", "")
    domain_name = params.get("domain_name", "")
    invalid_cpulist = params.get("invalid_cpulist", "")
    status_error = params.get("status_error", "no")
    error_msg = params.get("error_msg", "no")
    vcpus_list = ""
    offline_vcpus = ""

    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_bakup = vmxml.copy()

    try:
        # Modify vm with static vcpus
        if vm.is_alive():
            vm.destroy()
        vmxml.placement = vcpus_placement
        vmxml.set_vm_vcpus(vm_name,
                           vcpus_num,
                           vcpus_num,
                           topology_correction=True)
        logging.debug("Define guest with '%s' vcpus" % str(vcpus_num))

        # Start guest agent in vm
        vm.prepare_guest_agent()

        # Normal test: disable/ enable guest vcpus
        if option and status_error == "no":
            for cpu in range(1, vcpus_num):
                virsh.guestvcpus(vm_name, str(cpu), option, debug=True)

        # Normal test: combine: --disable 1-max then --enable 1
        if combine == "yes" and status_error == "no":
            vcpus_list = '1' + '-' + str(vcpus_num - 1)
            option = "--disable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)
            vcpus_list = '1'
            option = "--enable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)

        # Error test: invalid_domain
        if invalid_domain == "yes":
            vm_name = domain_name
        # Error test: invalid_cpulist
        if invalid_cpulist == "yes":
            if option == "--enable":
                vcpus_list = str(vcpus_num)
            else:
                vcpus_list = '0' + '-' + str(vcpus_num - 1)
            ret = virsh.guestvcpus(vm_name, vcpus_list, option)
        else:
            # Query guest vcpus
            ret = virsh.guestvcpus(vm_name)
            output = ret.stdout.strip()

        # Check test results
        if status_error == "yes":
            libvirt.check_result(ret, error_msg)
        else:
            # Check the test result of query
            ret_output = dict([item.strip() for item in line.split(":")]
                              for line in output.split("\n"))
            if combine == "yes":
                online_vcpus = '0-1'
            elif option == "--disable":
                online_vcpus = '0'
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if ret_output["online"] != online_vcpus:
                test.fail("Query result is different from"
                          " the '%s' command." % option)

            # Check the cpu in guest
            session = vm.wait_for_login()
            vm_cpu_info = cpuutil.get_cpu_info(session)
            session.close()

            if combine == "yes":
                online_vcpus = '0,1'
            elif option == "--disable":
                online_vcpus = '0'
                offline_vcpus = '1' + '-' + str(vcpus_num - 1)
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if offline_vcpus:
                if (vm_cpu_info["Off-line CPU(s) list"] != offline_vcpus
                        or vm_cpu_info["On-line CPU(s) list"] != online_vcpus):
                    test.fail("CPUs in vm is different from"
                              " the %s command." % option)
            elif vm_cpu_info["On-line CPU(s) list"] != online_vcpus:
                test.fail("On-line CPUs in vm is different"
                          " from the %s command." % option)
            else:
                logging.debug("lscpu in vm '%s' is: \n '%s'" %
                              (vm_name, vm_cpu_info))

    finally:
        # Recover VM
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_bakup.sync()