Esempio n. 1
0
def check_emulatorpin(params, test):
    """
    Check emulator affinity
    :params: the parameter dictionary
    """
    dicts = {}
    vm = params.get("vm")
    vm_name = params.get("main_vm")
    cpu_list = params.get("cpu_list")
    cgconfig = params.get("cgconfig", "on")
    options = params.get("emulatorpin_options")

    result = virsh.emulatorpin(vm_name, debug=True)
    cmd_output = result.stdout.strip().splitlines()
    logging.debug(cmd_output)
    # Parsing command output and putting them into python dictionary.
    for l in cmd_output[2:]:
        k, v = l.split(':')
        dicts[k.strip()] = v.strip()

    logging.debug(dicts)

    emulator_from_cmd = dicts['*']
    emulatorpin_from_xml = ""

    # To change a running guest with 'config' option, which will affect
    # next boot, if don't shutdown the guest, we need to run virsh dumpxml
    # with 'inactive' option to get guest XML changes.
    if options == "config" and vm and not vm.is_alive():
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name, "--inactive").cputune.emulatorpin
    else:
        emulatorpin_from_xml = \
            vm_xml.VMXML().new_from_dumpxml(vm_name).cputune.emulatorpin

    # To get guest corresponding emulator/cpuset.cpus value
    # from cpuset controller of the cgroup.
    if cgconfig == "on" and vm and vm.is_alive():
        emulatorpin_from_cgroup = get_emulatorpin_from_cgroup(params, test)
        logging.debug("The emulatorpin value from "
                      "cgroup: %s", emulatorpin_from_cgroup)

    # To check specified cpulist value with virsh command output
    # and/or cpuset.cpus from cpuset controller of the cgroup.
    if cpu_list:
        if vm and vm.is_alive() and options != "config":
            if (cpu_list != cpus_parser(emulator_from_cmd)) or \
                    (cpu_list != cpus_parser(emulatorpin_from_cgroup)):
                logging.error("To expect emulatorpin %s: %s", cpu_list,
                              emulator_from_cmd)
                return False
        else:
            if cpu_list != cpus_parser(emulatorpin_from_xml):
                logging.error("To expect emulatorpin %s: %s", cpu_list,
                              emulatorpin_from_xml)
                return False

        return True
Esempio n. 2
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    :params: the parameter dictionary
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")
    options = params.get("options", "")
    # --config option will act after vm shutdown.
    if options == "config":
        virsh.shutdown(vm_name)
    # The verification of the numa params should
    # be done when vm is running.
    if not virsh.is_alive(vm_name):
        virsh.start(vm_name)

    try:
        numa_params = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
    # VM XML omit numa entry when the placement is auto and mode is strict
    # So we need to set numa_params manually when exception happens.
    except LibvirtXMLAccessorError:
        numa_params = {'placement': 'auto', 'mode': 'strict'}

    if not numa_params:
        logging.error("Could not get numa parameters for %s", vm_name)
        return False

    mode_from_xml = numa_params['mode']
    # if the placement is auto, there is no nodeset in numa param.
    try:
        nodeset_from_xml = numa_params['nodeset']
    except KeyError:
        nodeset_from_xml = ""

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = cpus_parser(nodeset)
    nodeset_from_xml = cpus_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Esempio n. 3
0
def verify_numa_for_auto_replacement(test, params, vmxml, node_list, qemu_cpu,
                                     vm, memory_status):
    """
    Verify the checkpoints when placement = auto

    :param test: test object
    :param params: dict for the test
    :param vmxml: VMXML object
    :param node_list: list of the host numa nodes
    :param qemu_cpu: cpu list in each node
    :param vm: vm object
    :param memory_status: memory info in each node
    :raises: test.fail if cpu usage in node is not expected
    :return:
    """
    log_file = params.get("libvirtd_debug_file")
    vcpu_placement = params.get("vcpu_placement")
    numa_memory = vmxml.numa_memory
    numad_cmd_opt = "-w %s:%s" % (vmxml.vcpu, vmxml.max_mem // 1024)
    utils_test.libvirt.check_logfile('.*numad\s*%s' % numad_cmd_opt, log_file)
    check_res = utils_test.libvirt.check_logfile(
        'Nodeset returned from numad:', log_file)
    # Sample: Nodeset returned from numad: 0-1
    match_obj = re.search(r'Nodeset returned from numad:\s(.*)',
                          check_res.stdout_text)
    numad_ret = match_obj.group(1)
    logging.debug("Nodeset returned from numad: %s", numad_ret)

    numad_node = cpu.cpus_parser(numad_ret)
    left_node = [node_list.index(i) for i in node_list if i not in numad_node]
    numad_node_seq = [node_list.index(i) for i in numad_node]
    logging.debug("numad nodes are %s", numad_node)
    if numa_memory.get('placement') == 'auto':
        if numa_memory.get('mode') == 'strict':
            mem_compare(test,
                        numad_node_seq,
                        left_node,
                        memory_status=memory_status)
        elif numa_memory.get('mode') == 'preferred':
            mode_nodeset = 'prefer:' + numad_ret
            numa_mode_check(test, vm, mode_nodeset)
        else:
            mode_nodeset = numa_memory.get('mode') + ':' + numad_ret
            numa_mode_check(test, vm, mode_nodeset)
    if vcpu_placement == 'auto':
        for i in left_node:
            if qemu_cpu[i]:
                test.fail("cpu usage in node %s is not expected" % i)
        cpu_affinity_check(test, params.get("main_vm"), node=numad_node)
Esempio n. 4
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Esempio n. 5
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            test.fail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        ret = process.run(cmd, shell=True)
        cpu_num = int(ret.stdout_text.split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num + 1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            test.fail("vcpuinfo cpu affinity not expected")

    def numa_mode_check(mode_nodeset):
        """
        when the mode = 'preferred' or 'interleave', it is better to check
        numa_maps.
        """
        vm_pid = vm.get_pid()
        numa_map = '/proc/%s/numa_maps' % vm_pid
        # Open a file
        with open(numa_map) as file:
            for line in file.readlines():
                if line.split()[1] != mode_nodeset:
                    test.fail("numa node and nodeset %s is "
                              "not expected" % mode_nodeset)

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", " ".join(map(str, node_list)))

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = numa_memory["nodeset"]
            if '-' in nodes:
                for nnode in range(int(nodes.split('-')[0]),
                                   int(nodes.split('-')[1]) + 1):
                    ppc_memory_nodeset += str(node_list[nnode]) + ','
            else:
                node_lst = nodes.split(',')
                for nnode in range(len(node_lst)):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[nnode])]) + ','
            numa_memory["nodeset"] = ppc_memory_nodeset[:-1]
        except (KeyError, IndexError):
            pass

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop", ignore_status=True)
            process.run("virtlogd -d")
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted successfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes'

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    if not dynamic_parameters:
                        test.cancel("nodeset %s out of range" %
                                    numa_memory['nodeset'])
                    else:
                        if '-' in numa_memory['nodeset']:
                            nodes_size = len(numa_memory['nodeset'].split('-'))
                        else:
                            nodes_size = len(numa_memory['nodeset'].split(','))
                        if nodes_size > len(node_list):
                            test.cancel("nodeset %s out of range" %
                                        numa_memory['nodeset'])
                        else:
                            numa_memory['nodeset'] = node_list[:nodes_size]

        if vcpu_cpuset:
            pre_cpuset = cpu.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                if not dynamic_parameters:
                    test.cancel("cpuset %s out of range" % vcpu_cpuset)
                else:
                    random_cpus = []
                    # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is
                    # added twice or the list of selected CPUs is not long enough
                    for i in range(
                            len([int(i) for i in vcpu_cpuset.split(',')])):
                        rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        while rand_cpu in random_cpus:
                            rand_cpu = random.randint(min(cpu_list),
                                                      max(cpu_list))
                        random_cpus.append(rand_cpu)
                    random_cpus.sort()
                    vcpu_cpuset = (','.join(
                        [str(cpu_num) for cpu_num in random_cpus]))
                    pre_cpuset = cpu.cpus_parser(vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vmxml.xmltreefile.find('cputune'):
            vmxml.xmltreefile.remove_by_xpath('/cputune')
        else:
            logging.debug('No vcpupin found')
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem // 1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                test.fail("memory config %s not expected "
                          "after domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    test.fail("cpuset not found in domain xml.")

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n "
                          "error: %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node, vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    test.fail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            if numa_memory.get('mode') == 'strict':
                left_node = [
                    node_list.index(i) for i in node_list if i not in used_node
                ]
                used_node = [node_list.index(i) for i in used_node]
                mem_compare(used_node, left_node)
            elif numa_memory.get('mode') == 'preferred':
                mode_nodeset = 'prefer:' + numa_memory.get('nodeset')
                numa_mode_check(mode_nodeset)
            else:
                mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get(
                    'nodeset')
                numa_mode_check(mode_nodeset)
        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                test.fail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                logging.warning('numa log:\n%s\n' %
                                numad_log[0].split("numad ")[-1])
                logging.warning('numa cmd opt:\n%s\n' % numad_cmd_opt)
                test.fail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = cpu.cpus_parser(numad_ret)
            left_node = [
                node_list.index(i) for i in node_list if i not in numad_node
            ]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                if numa_memory.get('mode') == 'strict':
                    mem_compare(numad_node_seq, left_node)
                elif numa_memory.get('mode') == 'preferred':
                    mode_nodeset = 'prefer:' + numad_ret
                    numa_mode_check(mode_nodeset)
                else:
                    mode_nodeset = numa_memory.get('mode') + ':' + numad_ret
                    numa_mode_check(mode_nodeset)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        test.fail("cpu usage in node %s is not expected" % i)
                cpu_affinity_check(node=numad_node)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Esempio n. 6
0
def run(test, params, env):
    """
    Test setvcpu feature as follows:
    positive test:
        1. run virsh setvcpu with option --enable and --disable on inactive vm
           and check xml
        2. run virsh setvcpu with option --enable and --disable on active vm and
           check xml and number of online vcpu
        3. run virsh setvcpu with option --enable, --disable and --config on
           active vm and check inactive xml
        4. check the vcpu order when hot plug/unplug specific vcpu
    negative test:
        1. run virsh setvcpu with more than one vcpu on active vm and check error
        2. run virsh setvcpu to hotplug/unplug invalid vcpu and check error
        3. enable/disable vcpu0 when vm is active/inactive and check error
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpu_placement = params.get("vcpu_placement", "static")
    maxvcpu = int(params.get("maxvcpu", "8"))
    vcpu_current = params.get("vcpu_current", "1")
    vcpus_enabled = eval(params.get("vcpus_enabled", "{0}"))
    vcpus_hotplug = eval(params.get("vcpus_hotpluggable", "{0}"))
    setvcpu_option = eval(params.get("setvcpu_option", "{}"))
    setvcpu_action = params.get("setvcpu_action", "")
    start_timeout = int(params.get("start_timeout", "60"))
    check = params.get("check", "")
    err_msg = params.get("err_msg", "")
    status_error = "yes" == params.get("status_error", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def check_vcpu_status(cpulist, cpu_option, vcpus_online_pre=1):
        """
        test fail if the vcpu status from xml or the number of online vcpu from vm
        is not expected

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu
        :param cpus_online_pre: number of online vcpu before running setvcpu
        """
        if check.endswith("config"):
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(vmxml)

        # check the vcpu status in xml
        cpu_count = 0
        for cpu_id in cpulist:
            if "enable" in cpu_option:
                cpu_count += 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "yes"):
                    test.fail("vcpu status check fail")
            elif "disable" in cpu_option:
                cpu_count -= 1
                if (vmxml.vcpus.vcpu[cpu_id].get('enabled') != "no"):
                    test.fail("vcpu status check fail")
            else:
                test.fail("wrong vcpu status in xml")

        # login vm and check the number of online vcpu
        if check == "hotplug":
            if not cpu.check_if_vm_vcpu_match(cpu_count + cpus_online_pre, vm):
                test.fail("vcpu status check fail")

    def get_vcpu_order(vmxml):
        """
        return a {vcpu:order} dict based on vcpus in xml

        :param vmxml: the instance of VMXML class
        """
        vcpu_order = {}
        # get vcpu order based on the online vcpu
        for cpu_id in range(maxvcpu):
            if vmxml.vcpus.vcpu[cpu_id].get('enabled') == "yes":
                vcpu_order[cpu_id] = int(vmxml.vcpus.vcpu[cpu_id].get('order'))

        logging.debug("vcpu order based on vcpus in xml {}".format(vcpu_order))
        return vcpu_order.copy()

    def check_vcpu_order(cpulist, cpu_option, vmxml_pre):
        """
        check the value of vcpu order in xml. when the online vcpu changes,
        the order should be redefined.

        :param cpulist: a vcpu list set by setvcpu
        :param cpu_option: a string used by setvcpu such as config, enable and live
        :param vmxml_pre: the instance of VMXML class before run setvcpu
        """
        # only one vcpu is valid in the live operation of setvcpu command
        if len(cpulist) == 1:
            vcpu = cpulist[0]
        else:
            test.fail("wrong vcpu value from cfg file")

        vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # get vcpus order dict from previous xml
        order_pre = get_vcpu_order(vmxml_pre)
        # get vcpus order dict from updated xml
        order_new = get_vcpu_order(vmxml_new)

        # calculate the right dict of vcpu order based on the previous one
        if "enable" in cpu_option:
            order_expect = order_pre.copy()
            order_expect[vcpu] = len(order_pre) + 1
        elif "disable" in cpu_option:
            for vcpuid, order in order_pre.items():
                if order > order_pre[vcpu]:
                    order_pre[vcpuid] = order - 1
            order_pre.pop(vcpu)
            order_expect = order_pre.copy()

        if order_expect != order_new:
            test.fail("vcpu order check fail")

    try:
        # define vcpu in xml
        vmxml.placement = vcpu_placement
        vmxml.vcpu = maxvcpu
        vmxml.current_vcpu = vcpu_current
        del vmxml.cpuset

        # define vcpus in xml
        vcpu_list = []
        vcpu = {}

        for vcpu_id in range(maxvcpu):
            vcpu['id'] = str(vcpu_id)

            if vcpu_id in vcpus_enabled:
                vcpu['enabled'] = 'yes'
            else:
                vcpu['enabled'] = 'no'

            if vcpu_id in vcpus_hotplug:
                vcpu['hotpluggable'] = 'yes'
            else:
                vcpu['hotpluggable'] = 'no'
            vcpu_list.append(vcpu.copy())

        vcpus_xml = vm_xml.VMVCPUSXML()
        vcpus_xml.vcpu = vcpu_list
        vmxml.vcpus = vcpus_xml

        # Remove cpu topology to avoid that it doesn't match vcpu count
        if vmxml.get_cpu_topology():
            new_cpu = vmxml.cpu
            del new_cpu.topology
            vmxml.cpu = new_cpu

        vmxml.sync()
        logging.debug(vmxml)

        # assemble setvcpu_option
        if isinstance(setvcpu_option, str):
            setvcpu_option = {setvcpu_option: setvcpu_action}

        # run virsh setvcpu and check vcpus in xml
        if check == "coldplug":
            for cpus, option in setvcpu_option.items():
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = cpu.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option)

        # start vm
        if check.startswith("hotplug"):
            virsh.start(vm_name, debug=True, ignore_status=False)
            vm.wait_for_login(timeout=start_timeout)

        # turn setvcpu_option to an ordered dict
        if isinstance(setvcpu_option, tuple):
            d = collections.OrderedDict()
            length = len(setvcpu_option)
            if (length % 2):
                test.fail("test config fail")
            for i in range(length):
                if not (i % 2):
                    d[setvcpu_option[i]] = setvcpu_option[i + 1]
            setvcpu_option = collections.OrderedDict()
            setvcpu_option = d.copy()

        if check.startswith("hotplug"):
            for cpus, option in setvcpu_option.items():
                vmxml_pre = vm_xml.VMXML.new_from_dumpxml(vm_name)
                cpus_online_pre = vm.get_cpu_count()
                result_to_check = virsh.setvcpu(vm_name,
                                                cpus,
                                                option,
                                                debug=True)
                if not status_error:
                    cpulist = cpu.cpus_parser(cpus)
                    check_vcpu_status(cpulist, option, cpus_online_pre)
                    # check vcpu order only when live status of vcpu is changed
                    if 'config' not in option:
                        check_vcpu_order(cpulist, option, vmxml_pre)

        if 'result_to_check' in locals():
            if err_msg:
                err_msg = err_msg.split(";")
            libvirt.check_result(result_to_check, err_msg)

    finally:
        vmxml_backup.sync()
Esempio n. 7
0
def set_numa_parameter(test, params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")
    exceed_num = "yes" == params.get("exceed_num", "no")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host online nodes with memory %s", node_list)

    # Get host numa node number if exceed_num
    if exceed_num:
        nodeset = str(len(host_numa_node.all_nodes) + 1)
        params['numa_nodeset'] = nodeset

    # Get original numatune memory mode
    ori_mode = ''
    ori_numatune = {}
    if libvirt_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile.find('numatune'):
        ori_numatune = libvirt_xml.VMXML.get_numa_memory_params(vm_name)
        ori_mode = ori_numatune['mode'] if 'mode' in ori_numatune else ''

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict' which
    # should be the same with original mode
    if ori_mode == mode and (ori_numatune.get('nodeset') == nodeset or not nodeset):
        status_error = "no"
    if mode == "strict" and start_vm == "yes":
        status_error = "no"
    if ori_mode and ori_mode != mode and start_vm == "yes":
        status_error = "yes"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if not cgstop:
                test.fail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                test.cancel("Host does not support requested"
                            " nodeset %s" % used_node)
            else:
                test.fail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout.strip())
            else:
                test.fail("The 'mode' or/and 'nodeset' are"
                          " inconsistent with numatune XML")
Esempio n. 8
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        try:
            vm.start()
        except VMStartError as detail:
            # Recover the VM and failout early
            vmxml_backup.sync()
            logging.debug("Used VM XML:\n %s", vmxml)
            test.fail("VM Fails to start: %s" % detail)

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = cpu.online_cpus_count()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = random.choice(cpu_online_map)
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpu_online_map = list(map(str, cpu.cpu_online_list()))
            cpulist = ','.join(random.sample(cpu_online_map, 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            test.cancel("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts, test)
            else:
                set_emulatorpin_parameter(test_dicts, test)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
Esempio n. 9
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop",
                        ignore_status=True,
                        shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted successfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = cpu.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           numa_memory.get('memory_mode'),
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [
                total_online_node_list.index(i) for i in total_online_node_list
                if i != node
            ]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True,
                        shell=True)
        except path.CmdNotFoundError:
            pass
        if vm.is_alive():
            vm.destroy(gracefully=False)
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        backup_xml.sync()
Esempio n. 10
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    memory_status = []

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    try:
        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login().close()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            log_file = params.get("libvirtd_debug_file")
            check_res = utils_test.libvirt.check_logfile('Nodeset returned from numad:', log_file)
            # Sample: Nodeset returned from numad: 0-1
            match_obj = re.search(r'Nodeset returned from numad:\s(.*)', check_res.stdout_text)
            numad_ret = match_obj.group(1)
            logging.debug("Nodeset returned from numad: %s", numad_ret)
            used_node = cpu.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name, numa_memory.get('memory_mode'), str(node),
                           options, debug=True, ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node,
                vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [total_online_node_list.index(i)
                             for i in total_online_node_list if i != node]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Esempio n. 11
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    memory_status = []
    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem

    logging.debug("host node list is %s", " ".join(map(str, node_list)))

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = numa_memory["nodeset"]
            if '-' in nodes:
                for nnode in range(int(nodes.split('-')[0]), int(nodes.split('-')[1]) + 1):
                    ppc_memory_nodeset += str(node_list[nnode]) + ','
            else:
                node_lst = nodes.split(',')
                for nnode in range(len(node_lst)):
                    ppc_memory_nodeset += str(node_list[int(node_lst[nnode])]) + ','
            numa_memory["nodeset"] = ppc_memory_nodeset[:-1]
        except (KeyError, IndexError):
            pass

    try:
        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes'

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    if not dynamic_parameters:
                        test.cancel("nodeset %s out of range" % numa_memory['nodeset'])
                    else:
                        if '-' in numa_memory['nodeset']:
                            nodes_size = len(numa_memory['nodeset'].split('-'))
                        else:
                            nodes_size = len(numa_memory['nodeset'].split(','))
                        if nodes_size > len(node_list):
                            test.cancel("nodeset %s out of range" % numa_memory['nodeset'])
                        else:

                            numa_memory['nodeset'] = ','.join(map(str,
                                                                  node_list[:nodes_size]))

        if vcpu_cpuset:
            pre_cpuset = cpu.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                if not dynamic_parameters:
                    test.cancel("cpuset %s out of range" % vcpu_cpuset)
                else:
                    random_cpus = []
                    # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is
                    # added twice or the list of selected CPUs is not long enough
                    for i in range(len([int(i) for i in vcpu_cpuset.split(',')])):
                        rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        while rand_cpu in random_cpus:
                            rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        random_cpus.append(rand_cpu)
                    random_cpus.sort()
                    vcpu_cpuset = (','.join([str(cpu_num) for cpu_num in random_cpus]))
                    pre_cpuset = cpu.cpus_parser(vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        if vmxml.xmltreefile.find('cputune'):
            vmxml.xmltreefile.remove_by_xpath('/cputune')
        else:
            logging.debug('No vcpupin found')
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login().close()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                test.fail("memory config %s not expected "
                          "after domain start" %
                          numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    test.fail("cpuset not found in domain xml.")

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n "
                          "error: %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node,
            vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    test.fail("cpu %s is not expected" % i)
            cpu_affinity_check(test, vm_name, cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            if numa_memory.get('mode') == 'strict':
                left_node = [node_list.index(i) for i in node_list if i not in used_node]
                used_node = [node_list.index(i) for i in used_node]
                mem_compare(test, used_node, left_node, memory_status=memory_status)
            elif numa_memory.get('mode') == 'preferred':
                mode_nodeset = 'prefer:' + numa_memory.get('nodeset')
                numa_mode_check(test, vm, mode_nodeset)
            else:
                mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get('nodeset')
                numa_mode_check(test, vm, mode_nodeset)

        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            verify_numa_for_auto_replacement(test, params, vmxml,
                                             node_list, qemu_cpu, vm,
                                             memory_status=memory_status)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()