Exemplo n.º 1
0
def check_emulatorpin(params):
    """
    Check emulator affinity
    :params: the parameter dictionary
    """
    dicts = {}
    vm = params.get("vm")
    vm_name = params.get("main_vm")
    cpu_list = params.get("cpu_list")
    cgconfig = params.get("cgconfig", "on")
    options = params.get("emulatorpin_options")

    result = virsh.emulatorpin(vm_name)
    cmd_output = result.stdout.strip().splitlines()
    logging.debug(cmd_output)
    # Parsing command output and putting them into python dictionary.
    for l in cmd_output[2:]:
        k, v = l.split(':')
        dicts[k.strip()] = v.strip()

    logging.debug(dicts)

    emulator_from_cmd = dicts['*']
    emulatorpin_from_xml = ""

    # To change a running guest with 'config' option, which will affect
    # next boot, if don't shutdown the guest, we need to run virsh dumpxml
    # with 'inactive' option to get guest XML changes.
    if options == "config" and vm and not vm.is_alive():
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name, "--inactive").cputune.emulatorpin
    else:
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name).cputune.emulatorpin

    # To get guest corresponding emulator/cpuset.cpus value
    # from cpuset controller of the cgroup.
    if cgconfig == "on" and vm and vm.is_alive():
        emulatorpin_from_cgroup = get_emulatorpin_from_cgroup(params)
        logging.debug("The emulatorpin value from "
                      "cgroup: %s", emulatorpin_from_cgroup)

    # To check specified cpulist value with virsh command output
    # and/or cpuset.cpus from cpuset controller of the cgroup.
    if cpu_list:
        if vm and vm.is_alive() and options != "config":
            if (cpu_list != cpus_parser(emulator_from_cmd)) or \
                    (cpu_list != cpus_parser(emulatorpin_from_cgroup)):
                logging.error("To expect emulatorpin %s: %s",
                              cpu_list, emulator_from_cmd)
                return False
        else:
            if cpu_list != cpus_parser(emulatorpin_from_xml):
                logging.error("To expect emulatorpin %s: %s",
                              cpu_list, emulatorpin_from_xml)
                return False

        return True
Exemplo n.º 2
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    :params: the parameter dictionary
    """
    # vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    # --config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    # The verification of the numa params should
    # be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    try:
        numa_params = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
    # VM XML omit numa entry when the placement is auto and mode is strict
    # So we need to set numa_params manually when exception happens.
    except LibvirtXMLAccessorError:
        numa_params = {"placement": "auto", "mode": "strict"}

    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params["mode"]
    # if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params["nodeset"]
    except KeyError:
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = cpus_parser(nodeset)
    nodeset_from_xml = cpus_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Exemplo n.º 3
0
def set_numa_parameter(params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict'
    if mode == "strict" and start_vm == "yes":
        status_error = "no"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if cgstop:
                raise error.TestFail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            if (cpus_parser(nodeset)[-1] + 1) > num_numa_nodes():
                raise error.TestNAError("Host does not support requested"
                                        " nodeset")
            else:
                raise error.TestFail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'mode' or/and 'nodeset' are"
                                     " inconsistent with numatune XML")
Exemplo n.º 4
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]), int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1", ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration."
                            % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i, mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                                       i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm "
                          "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected"
                          % (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." % topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'],
                                              i['nodenum'], i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Exemplo n.º 5
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError("Hugepage size [%s] isn't supported, "
                                      "please verify kernel cmdline configuration."
                                      % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        qemu_conf.hugetlbfs_mount = mount_path
        libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i > max(node_list):
                    raise error.TestNAError("%s in nodeset out of range" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                if i['size'] != "1048576":
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])
                else:
                    # kernel 1G hugepage runtime number update not supported
                    # now, check whether current host setting satisfy
                    # requirement or not.
                    if i['num'] < node_val:
                        raise error.TestNAError("%s size hugepage number %s of"
                                                " node %s not satisfy for "
                                                "testing" % (i['size'],
                                                             node_val,
                                                             i['nodenum']))

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
Exemplo n.º 6
0
            except error.CmdError, e:
                raise error.TestError("Default hugepage size is not 2M, "
                                      "please make sure all sizes hugepage "
                                      "configuration is in kernel cmdline."
                                      "\n%s" % e)
        qemu_conf.hugetlbfs_mount = mount_path
        libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i > max(node_list):
                    raise error.TestNAError("%s in nodeset out of range" % i)
Exemplo n.º 7
0
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized."
                                    % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
Exemplo n.º 8
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
        4. check the vcpu order when hot plug/unplug specific vcpu
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = ast.literal_eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = ast.literal_eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = ast.literal_eval(params.get("setvcpu_option", "{}"))
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    def get_vcpu_order(vmxml):
        """
        return a {vcpu:order} dict based on vcpus in xml

        :param vmxml: the instance of VMXML class
        """
        vcpu_order = {}
        # get vcpu order based on the online vcpu
        for cpu_id in range(maxvcpu):
            if vmxml.vcpus.vcpu[cpu_id].get('enabled') == "yes":
                vcpu_order[cpu_id] = int(vmxml.vcpus.vcpu[cpu_id].get('order'))

        logging.debug("vcpu order based on vcpus in xml {}".format(vcpu_order))
        return vcpu_order.copy()

    def check_vcpu_order(cpulist, cpu_option, vmxml_pre):
        """
        check the value of vcpu order in xml. when the online vcpu changes,
        the order should be redefined.

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu such as config, enable and live
        :param vmxml_pre: the instance of VMXML class before run setvcpu
        """
        # only one vcpu is valid in the live operation of setvcpu command
        if len(cpulist) == 1:
            vcpu = cpulist[0]
        else:
            test.fail("wrong vcpu value from cfg file")

        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # get vcpus order dict from previous xml
        order_pre = get_vcpu_order(vmxml_pre)
        # get vcpus order dict from updated xml
        order_new = get_vcpu_order(vmxml_new)

        # calculate the right dict of vcpu order based on the previous one
        if "enable" in cpu_option:
            order_expect = order_pre.copy()
            order_expect[vcpu] = len(order_pre) + 1
        elif "disable" in cpu_option:
            for vcpuid, order in order_pre.items():
                if order > order_pre[vcpu]:
                    order_pre[vcpuid] = order - 1
            order_pre.pop(vcpu)
            order_expect = order_pre.copy()

        if order_expect != order_new:
            test.fail("vcpu order check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml
        vmxml.sync()
        logging.debug(vmxml)

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name, cpus, option, debug=True)
                cpulist = libvirt.cpus_parser(cpus)
                check_vcpu_status(cpulist, option)

        # start vm
        virsh.start(vm_name, debug=True, ignore_status=False)
        vm.wait_for_login(timeout=start_timeout)

        # turn setvcpu_option to an ordered dict
        if isinstance(setvcpu_option, tuple):
            d = collections.OrderedDict()
            length = len(setvcpu_option)
            if (length % 2):
                test.fail("test config fail")
            for i in range(length):
                if not (i % 2):
                    d[setvcpu_option[i]] = setvcpu_option[i+1]
            setvcpu_option = collections.OrderedDict()
            setvcpu_option = d.copy()

        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                vmxml_pre = vm_xml.VMXML.new_from_dumpxml(vm_name)
                cpus_online_pre = vm.get_cpu_count()
                result_to_check = virsh.setvcpu(vm_name, cpus, option, debug=True)
                if not status_error:
                    cpulist = libvirt.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option, cpus_online_pre)
                    # check vcpu order only when live status of vcpu is changed
                    if 'config' not in option:
                        check_vcpu_order(cpulist, option, vmxml_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = int(open('/proc/cpuinfo').read().count('processor'))
    test_dicts['host_cpus'] = host_cpus

    cpu_list = None

    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    # If the physical CPU N doesn't exist, it's an expected error
    if cpu_list and max(cpu_list) > host_cpus - 1:
        test_dicts["status_error"] = "yes"

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration."
                            % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i, mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        arch = platform.machine()
        if default_hp_size == 2048 and 'ppc64' not in arch:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                                       i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm "
                          "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected"
                          % (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." % topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'],
                                              i['nodenum'], i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            cpulist = "0-%s" % cpu_max
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Exemplo n.º 12
0
def guest_numa_check(vm, exp_vcpu):
    """
    To check numa node values

    :param vm: VM object
    :param exp_vcpu: dict of expected vcpus
    :return: True if check succeed, False otherwise
    """
    logging.debug("Check guest numa")
    session = vm.wait_for_login()
    vm_cpu_info = utils_misc.get_cpu_info(session)
    session.close()
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
    try:
        node_num_xml = len(vmxml.cpu.numa_cell)
    except (TypeError, LibvirtXMLNotFoundError):
        # handle if no numa cell in guest xml, bydefault node 0
        node_num_xml = 1
    node_num_guest = int(vm_cpu_info["NUMA node(s)"])
    exp_num_nodes = node_num_xml
    status = True
    for node in range(node_num_xml):
        try:
            node_cpu_xml = vmxml.cpu.numa_cell[node]['cpus']
            node_cpu_xml = libvirt.cpus_parser(node_cpu_xml)
        except (TypeError, LibvirtXMLNotFoundError):
            try:
                node_cpu_xml = vmxml.current_vcpu
            except LibvirtXMLNotFoundError:
                node_cpu_xml = vmxml.vcpu
            node_cpu_xml = list(range(int(node_cpu_xml)))
        try:
            node_mem_xml = vmxml.cpu.numa_cell[node]['memory']
        except (TypeError, LibvirtXMLNotFoundError):
            node_mem_xml = vmxml.memory
        node_mem_guest = int(vm.get_totalmem_sys(node=node))
        node_cpu_xml_copy = node_cpu_xml[:]
        for cpu in node_cpu_xml_copy:
            if int(cpu) >= int(exp_vcpu["guest_live"]):
                node_cpu_xml.remove(cpu)
        if (not node_cpu_xml) and node_mem_guest == 0:
            exp_num_nodes -= 1
        try:
            node_cpu_guest = vm_cpu_info["NUMA node%s CPU(s)" % node]
            node_cpu_guest = libvirt.cpus_parser(node_cpu_guest)
        except KeyError:
            node_cpu_guest = []
        # Check cpu
        if node_cpu_xml != node_cpu_guest:
            status = False
            logging.error("Mismatch in cpus in node %s: xml %s guest %s", node,
                          node_cpu_xml, node_cpu_guest)
        # Check memory
        if int(node_mem_xml) != node_mem_guest:
            status = False
            logging.error("Mismatch in memory in node %s: xml %s guest %s",
                          node, node_mem_xml, node_mem_guest)
    # Check no. of nodes
    if exp_num_nodes != node_num_guest:
        status = False
        logging.error("Mismatch in numa nodes expected nodes: %s guest: %s",
                      exp_num_nodes, node_num_guest)
    return status
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        try:
            vm.start()
        except VMStartError as detail:
            # Recover the VM and failout early
            vmxml_backup.sync()
            logging.debug("Used VM XML:\n %s", vmxml)
            test.fail("VM Fails to start: %s" % detail)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = cpu.online_cpus_count()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = random.choice(cpu_online_map)
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = ','.join(random.sample(cpu_online_map, 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            test.cancel("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        try:
            vm.start()
        except VMStartError as detail:
            # Recover the VM and failout early
            vmxml_backup.sync()
            logging.debug("Used VM XML:\n %s", vmxml)
            test.fail("VM Fails to start: %s" % detail)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = cpu.online_cpus_count()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = random.choice(cpu_online_map)
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = ','.join(random.sample(cpu_online_map, 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            test.cancel("CPU-list=%s is not recognized."
                        % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            cpulist = "0-%s" % cpu_max
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized."
                                    % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Exemplo n.º 16
0
def guest_numa_check(vm, exp_vcpu):
    """
    To check numa node values

    :param vm: VM object
    :param exp_vcpu: dict of expected vcpus
    :return: True if check succeed, False otherwise
    """
    logging.debug("Check guest numa")
    session = vm.wait_for_login()
    vm_cpu_info = utils_misc.get_cpu_info(session)
    session.close()
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
    try:
        node_num_xml = len(vmxml.cpu.numa_cell)
    except (TypeError, LibvirtXMLNotFoundError):
        # handle if no numa cell in guest xml, bydefault node 0
        node_num_xml = 1
    node_num_guest = int(vm_cpu_info["NUMA node(s)"])
    exp_num_nodes = node_num_xml
    status = True
    for node in range(node_num_xml):
        try:
            node_cpu_xml = vmxml.cpu.numa_cell[node]['cpus']
            node_cpu_xml = libvirt.cpus_parser(node_cpu_xml)
        except (TypeError, LibvirtXMLNotFoundError):
            try:
                node_cpu_xml = vmxml.current_vcpu
            except LibvirtXMLNotFoundError:
                node_cpu_xml = vmxml.vcpu
            node_cpu_xml = list(range(int(node_cpu_xml)))
        try:
            node_mem_xml = vmxml.cpu.numa_cell[node]['memory']
        except (TypeError, LibvirtXMLNotFoundError):
            node_mem_xml = vmxml.memory
        node_mem_guest = int(vm.get_totalmem_sys(node=node))
        node_cpu_xml_copy = node_cpu_xml[:]
        for cpu in node_cpu_xml_copy:
            if int(cpu) >= int(exp_vcpu["guest_live"]):
                node_cpu_xml.remove(cpu)
        if (not node_cpu_xml) and node_mem_guest == 0:
            exp_num_nodes -= 1
        try:
            node_cpu_guest = vm_cpu_info["NUMA node%s CPU(s)" % node]
            node_cpu_guest = libvirt.cpus_parser(node_cpu_guest)
        except KeyError:
            node_cpu_guest = []
        # Check cpu
        if node_cpu_xml != node_cpu_guest:
            status = False
            logging.error("Mismatch in cpus in node %s: xml %s guest %s", node,
                          node_cpu_xml, node_cpu_guest)
        # Check memory
        if int(node_mem_xml) != node_mem_guest:
            status = False
            logging.error("Mismatch in memory in node %s: xml %s guest %s", node,
                          node_mem_xml, node_mem_guest)
    # Check no. of nodes
    if exp_num_nodes != node_num_guest:
        status = False
        logging.error("Mismatch in numa nodes expected nodes: %s guest: %s", exp_num_nodes,
                      node_num_guest)
    return status
Exemplo n.º 17
0
def set_numa_parameter(test, params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host online nodes with memory %s", node_list)

    # Get original numatune memory mode
    ori_mode = ''
    ori_numatune = {}
    if libvirt_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile.find('numatune'):
        ori_numatune = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
        ori_mode = ori_numatune['mode'] if 'mode' in ori_numatune else ''

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict' which
    # should be the same with original mode
    if ori_mode == mode and (ori_numatune.get('nodeset') == nodeset or not nodeset):
        status_error = "no"
    if mode == "strict" and start_vm == "yes":
        status_error = "no"
    if ori_mode and ori_mode != mode and start_vm == "yes":
        status_error = "yes"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if not cgstop:
                test.fail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                test.cancel("Host does not support requested"
                            " nodeset %s" % used_node)
            else:
                test.fail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'mode' or/and 'nodeset' are"
                          " inconsistent with numatune XML")
Exemplo n.º 18
0
def set_numa_parameter(test, params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host online nodes with memory %s", node_list)

    # Get original numatune memory mode
    ori_mode = ''
    ori_numatune = {}
    if libvirt_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile.find(
            'numatune'):
        ori_numatune = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
        ori_mode = ori_numatune['mode'] if 'mode' in ori_numatune else ''

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict' which
    # should be the same with original mode
    if ori_mode == mode and (ori_numatune.get('nodeset') == nodeset
                             or not nodeset):
        status_error = "no"
    if mode == "strict" and start_vm == "yes":
        status_error = "no"
    if ori_mode and ori_mode != mode and start_vm == "yes":
        status_error = "yes"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if not cgstop:
                test.fail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                test.cancel("Host does not support requested"
                            " nodeset %s" % used_node)
            else:
                test.fail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'mode' or/and 'nodeset' are"
                          " inconsistent with numatune XML")
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
        4. check the vcpu order when hot plug/unplug specific vcpu
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
        3. enable/disable vcpu0 when vm is active/inactive and check error
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = ast.literal_eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = ast.literal_eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = ast.literal_eval(params.get("setvcpu_option", "{}"))
    setvcpu_action = params.get("setvcpu_action", "")
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not utils_misc.check_if_vm_vcpu_match(
                    cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    def get_vcpu_order(vmxml):
        """
        return a {vcpu:order} dict based on vcpus in xml

        :param vmxml: the instance of VMXML class
        """
        vcpu_order = {}
        # get vcpu order based on the online vcpu
        for cpu_id in range(maxvcpu):
            if vmxml.vcpus.vcpu[cpu_id].get('enabled') == "yes":
                vcpu_order[cpu_id] = int(vmxml.vcpus.vcpu[cpu_id].get('order'))

        logging.debug("vcpu order based on vcpus in xml {}".format(vcpu_order))
        return vcpu_order.copy()

    def check_vcpu_order(cpulist, cpu_option, vmxml_pre):
        """
        check the value of vcpu order in xml. when the online vcpu changes,
        the order should be redefined.

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu such as config, enable and live
        :param vmxml_pre: the instance of VMXML class before run setvcpu
        """
        # only one vcpu is valid in the live operation of setvcpu command
        if len(cpulist) == 1:
            vcpu = cpulist[0]
        else:
            test.fail("wrong vcpu value from cfg file")

        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # get vcpus order dict from previous xml
        order_pre = get_vcpu_order(vmxml_pre)
        # get vcpus order dict from updated xml
        order_new = get_vcpu_order(vmxml_new)

        # calculate the right dict of vcpu order based on the previous one
        if "enable" in cpu_option:
            order_expect = order_pre.copy()
            order_expect[vcpu] = len(order_pre) + 1
        elif "disable" in cpu_option:
            for vcpuid, order in order_pre.items():
                if order > order_pre[vcpu]:
                    order_pre[vcpuid] = order - 1
            order_pre.pop(vcpu)
            order_expect = order_pre.copy()

        if order_expect != order_new:
            test.fail("vcpu order check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml
        vmxml.sync()
        logging.debug(vmxml)

        # assemble setvcpu_option
        if isinstance(setvcpu_option, str):
            setvcpu_option = {setvcpu_option: setvcpu_action}

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = libvirt.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option)

        # start vm
        if check.startswith("hotplug"):
            virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout)

        # turn setvcpu_option to an ordered dict
        if isinstance(setvcpu_option, tuple):
            d = collections.OrderedDict()
            length = len(setvcpu_option)
            if (length % 2):
                test.fail("test config fail")
            for i in range(length):
                if not (i % 2):
                    d[setvcpu_option[i]] = setvcpu_option[i + 1]
            setvcpu_option = collections.OrderedDict()
            setvcpu_option = d.copy()

        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                vmxml_pre = vm_xml.VMXML.new_from_dumpxml(vm_name)
                cpus_online_pre = vm.get_cpu_count()
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = libvirt.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option, cpus_online_pre)
                    # check vcpu order only when live status of vcpu is changed
                    if 'config' not in option:
                        check_vcpu_order(cpulist, option, vmxml_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()