Exemple #1
0
 def online(self, st):
     st_params = self.params.object_params(st)
     times = st_params.get("check_times") 
     interval_time = st_params.get("interval_time") 
     repeat_interval_time = st_params.get("repeat_interval_time") 
     reverse = st_params.get("reverse")
     prepare = st_params.get("preare_env")
     if prepare == "yes":
         output = self.session.cmd_output("chcpu -e 0-%s" % (cpus - 1))
     if self.params.get("cpu_type") == "hotplugable":
         cpus = self.maxcpus
     else:
         cpus = self.smp
     for i in range(int(times)):
         if reverse == "yes":
             #output = self.session.cmd_output("chcpu -d 1-7",timeout=self.timeout)
             output = self.session.cmd_output("chcpu -d 0-%s" % (cpus - 1),timeout=self.timeout)
             time.sleep(int(interval_time))
             output = self.session.cmd_output("lscpu",timeout=self.timeout)
             print (output)
             info = utils_misc.get_cpu_info(self.session)
             error_context.context("Check online cpu is %s" % (cpus -1), logging.info)
             if info['On-line CPU(s) list'] != str(cpus-1):
                 self.test.fail("Failed to offline cpu to 1, actual value is %s" % info['CPU(s)']) 
         #output = self.session.cmd_output("chcpu -e 0-7" ,timeout=self.timeout)
         output = self.session.cmd_output("chcpu -e 0-%s" % (cpus - 1),timeout=self.timeout)
         time.sleep(int(interval_time))
         info = utils_misc.get_cpu_info(self.session)
         error_context.context("Check online cpu are 0-%s" % (cpus - 1), logging.info)
         if info['On-line CPU(s) list'] !=  "0-%s" % (cpus - 1):
             self.test.fail("Failed to online cpu to max, actual value is %s" % info['On-line CPU(s) list']) 
         time.sleep(int(repeat_interval_time))
Exemple #2
0
def check_host(test):
    """
    Check the host for memtune and cachetune tests

    :param test: test object
    """
    # 1.Check virsh capabilities
    if utils_misc.get_cpu_info()['Flags'].find('mba ') == -1:
        test.cancel("This machine doesn't support cpu 'mba' flag")

    # 2.Mount resctrl
    process.run("mount -t resctrl resctrl /sys/fs/resctrl",
                verbose=True,
                shell=True)
    process.run("echo 'L3:0=0ff;1=0ff' > /sys/fs/resctrl/schemata",
                verbose=True,
                shell=True)

    # 3.Check host MBA info from virsh capabilities output
    cmd = "virsh capabilities | awk '/<memory_bandwidth>/,\
           /<\/memory_bandwidth>/'"

    out = ""
    out = process.run(cmd, shell=True).stdout_text

    if not re.search('node', out):
        test.fail("There is no memory_bandwidth info in capablities")
Exemple #3
0
 def offline(self, st):
     current_cpus = self.session.cmd_output("lscpu |grep ^CPU\(|awk '{print $2}'").strip('\n')
     output = self.session.cmd_output("chcpu -d 0-%s" % (int(current_cpus) - 1),timeout=self.timeout)
     info = utils_misc.get_cpu_info(self.session)
     error_context.context("Check online cpu number is %s" % info['CPU(s)'], logging.info)
     if info['CPU(s)'] != 1:
         self.test.fail("Failed to offline cpu to 1, actual value is %s" % info['CPU(s)']) 
Exemple #4
0
 def info(self, opt):
     info = utils_misc.get_cpu_info(self.session)
     if info['Core(s) per socket'] != self.vcpu_cores:
        error_context.context("Failed to verify cpu cores", logging.info) 
     if info['Thread(s) per core'] != self.vcpu_threads:
        error_context.context("Failed to verify cpu cores", logging.info) 
     if info['CPU(s)'] != self.smp:
        error_context.context("Failed to verify cpu smp", logging.info) 
Exemple #5
0
 def online(self, st):
     if self.params.get("cpu_type") == "hotplugable":
         cpus = self.maxcpus
     else:
         cpus = self.smp
     current_cpus = self.session.cmd_output("lscpu |grep ^CPU\(|awk '{print $2}'").strip('\n')
     output = self.session.cmd_output("chcpu -e 0-%s" % (int(current_cpus) - 1),timeout=self.timeout)
     info = utils_misc.get_cpu_info(self.session)
     error_context.context("Check online cpu number is %s" % info['CPU(s)'], logging.info)
     if info['CPU(s)'] != cpus:
         self.test.fail("Failed to online cpu to max, actual value is %s" % info['CPU(s)']) 
Exemple #6
0
 def cpuinfo(self, opt):
     info = utils_misc.get_cpu_info(self.session)
     if info['Core(s) per socket'] != str(self.vcpu_cores):
        error_context.context("Failed to verify cpu cores", logging.info) 
     if info['Thread(s) per core'] != str(self.vcpu_threads):
        error_context.context("Failed to verify cpu threads", logging.info) 
     if info['CPU(s)'] == str(self.smp) or info['CPU(s)'] == str(self.maxcpus):
        error_context.context("successful to verify cpu smp", logging.info) 
     else:
        error_context.context("Failed to verify cpu smp", logging.info) 
     self.session = self.vm.wait_for_login(timeout=self.timeout)
Exemple #7
0
 def info(self, opt):
     info = utils_misc.get_cpu_info(self.session)
     if info['Core(s) per socket'] != str(self.vcpu_cores):
         test.fail("Failed to verify cpu cores,actual value is %s" % info['Core(s) per socket']) 
     if info['Thread(s) per core'] != str(self.vcpu_threads):
         test.fail("Failed to verify cpu threads,actual value is %s" % info['Thread(s) per core']) 
     if params.get("cpu_type") == "hotplugable":
         if info['CPU(s)'] != str(self.maxcpus):
             test.fail("Failed to verify hotpluged cpus,actual value is %s" % info['CPU(s)']) 
     elif info['CPU(s)'] != str(self.smp):
         test.fail("Failed to verify cpus,actual value is %s" % info['CPU(s)']) 
     self.session = self.vm.wait_for_login(timeout=self.timeout)
Exemple #8
0
 def online(self, st):
     st_params = self.params.object_params(st)
     times = st_params.get("check_times") 
     interval_time = st_params.get("check_interval") 
     repeat_interval_time = st_params.get("repeat_interval_time") 
     reverse = st_params.get("reverse")
     if self.params.get("cpu_type") == "hotplugable":
         cpus = self.maxcpus
     else:
         cpus = self.smp
     for i in range(int(times)):
         if reverse == "yes":
             output = self.session.cmd_output("chcpu -d 0-%s" % (cpus - 1),timeout=self.timeout)
             info = utils_misc.get_cpu_info(self.session)
             error_context.context("Check online cpu number is %s" % info['CPU(s)'], logging.info)
             if info['CPU(s)'] != "1":
                 self.test.fail("Failed to offline cpu to 1, actual value is %s" % info['CPU(s)']) 
             time.sleep(5)
         output = self.session.cmd_output("chcpu -e 0-%s" % (cpus - 1),timeout=self.timeout)
         info = utils_misc.get_cpu_info(self.session)
         error_context.context("Check online cpu number is %s" % info['CPU(s)'], logging.info)
         if info['CPU(s)'] != cpus:
             self.test.fail("Failed to online cpu to max, actual value is %s" % info['CPU(s)']) 
         time.sleep(int(repeat_interval_time))
Exemple #9
0
 def info(self, opt):
     info = utils_misc.get_cpu_info(self.session)
     error_context.context("Cores in guest is %s" % info['Core(s) per socket'], logging.info)
     if info['Core(s) per socket'] != str(self.vcpu_cores):
         self.test.fail("Failed to verify cpu cores,actual value is %s" % info['Core(s) per socket']) 
     error_context.context("Threads in guest is %s" % info['Thread(s) per core'], logging.info)
     if info['Thread(s) per core'] != str(self.vcpu_threads):
         self.test.fail("Failed to verify cpu threads,actual value is %s" % info['Thread(s) per core']) 
     error_context.context("Cpus in guest is %s" % info['CPU(s)'], logging.info)
     if self.params.get("cpu_type") == "hotplugable":
         print (self.maxcpus)
         if info['CPU(s)'] != str(self.maxcpus):
             self.test.fail("Failed to verify hotpluged cpus,actual value is %s" % info['CPU(s)']) 
     elif info['CPU(s)'] != str(self.smp):
         self.test.fail("Failed to verify cpus,actual value is %s" % info['CPU(s)']) 
     self.session = self.vm.wait_for_login(timeout=self.timeout)
Exemple #10
0
 def offline(self, st):
     st_params = self.params.object_params(st)
     times = st_params.get("check_times") 
     interval_time = st_params.get("interval_time") 
     repeat_interval_time = st_params.get("repeat_interval_time") 
     clean = st_params.get("clean_env")
     prepare = st_params.get("preare_env")
     if prepare == "yes":
         output = self.session.cmd_output("chcpu -e 0-%s" % (cpus - 1))
     if self.params.get("cpu_type") == "hotplugable":
         cpus = self.maxcpus
     else:
         cpus = self.smp
     #current_cpus = self.session.cmd_output("lscpu |grep ^CPU\(|awk '{print $2}'").strip('\n')
     output = self.session.cmd_output("chcpu -d 0-%s" % (cpus - 1),timeout=self.timeout)
     time.sleep(int(interval_time))
     info = utils_misc.get_cpu_info(self.session)
     error_context.context("Check online cpu number is %s" % (cpus - 1), logging.info)
     if info['On-line CPU(s) list'] != str(cpus-1):
         self.test.fail("Failed to offline cpu to 1, actual value is %s" % info['On-line CPU(s) list']) 
     if clean == "yes":
         output = self.session.cmd_output("chcpu -e 0-%s" % (cpus - 1))
Exemple #11
0
def run(test, params, env):
    """
    Test:<memorytune>
    1. Check virsh capabilities report right MBA info
    2  Mount resctrl
    3. Check host MBA info from virsh capabilities output
    4. Add memory bandwidth in domain XML and start vm
    5. check resctrl dir and verify libvirt set right values
    """

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    test_vm = env.get_vm(vm_name)
    schemata_file1 = params.get("schemata_file1", "")
    schemata_file2 = params.get("schemata_file2", "")
    mb_value1 = params.get("mb_value1", "")
    mb_value2 = params.get("mb_value2", "")
    vcpu_max_num = int(params.get("vcpu_max_num"))
    vcpu_current_num = int(params.get("vcpu_current_num"))
    topology_correction = "yes" == params.get("topology_correction", "no")

    # 1.Check virsh capabilities
    if not utils_misc.get_cpu_info()['Flags'].find('mba '):
        test.cancel("This machine doesn't support cpu 'mba' flag")

    # 2.Mount resctrl
    process.run("mount -t resctrl resctrl /sys/fs/resctrl",
                verbose=True, shell=True)
    process.run("echo 'L3:0=0ff;1=0ff' > /sys/fs/resctrl/schemata",
                verbose=True, shell=True)

    # 3.Check host MBA info from virsh capabilities output
    cmd = "virsh capabilities | awk '/<memory_bandwidth>/,\
           /<\/memory_bandwidth>/'"
    out = ""
    out = process.run(cmd, shell=True).stdout_text

    if not re.search('node', out):
        test.fail("There is no memory_bandwidth info in capablities")

    # 4.Add memory bandwidth in domain XML
    memorytune_item_list = [ast.literal_eval(x)
                            for x in params.get("memorytune_items",
                                                "").split(';')]
    node_item_list1 = [ast.literal_eval(x)
                       for x in params.get("node_items1",
                                           "").split(';')]
    node_item_list2 = [ast.literal_eval(x)
                       for x in params.get("node_items2",
                                           "").split(';')]
    node_item_list = []
    node_item_list.append(node_item_list1)
    node_item_list.append(node_item_list2)
    cachetune_items = params.get("cachetune_items")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    try:
        # change the vcpu number from 2 to 5
        vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num,
                           topology_correction=topology_correction)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        cputunexml = vm_xml.VMCPUTuneXML()
        logging.debug("cputunexml: %s" % cputunexml)

        if memorytune_item_list:
            for mitem in range(len(memorytune_item_list)):
                logging.debug("node %d " % mitem)
                memorytunexml = vm_xml.MemoryTuneXML()

                memorytunexml.vcpus = memorytune_item_list[mitem]['vcpus']
                for node in node_item_list[mitem]:
                    nodexml = memorytunexml.NodeXML()
                    nodexml.id = node['id']
                    nodexml.bandwidth = node['bandwidth']
                    memorytunexml.set_node(nodexml)

                logging.debug("memorytunexml.xml %s" % memorytunexml.xml)

                cputunexml.set_memorytune(memorytunexml)
                logging.debug("cputunexml.xml %s" % cputunexml.xml)

        if cachetune_items:
            cachetune_item_list = [ast.literal_eval(x)
                                   for x in params.get("cachetune_items",
                                                       "").split(';')]
            cache_item_list = [ast.literal_eval(x)
                               for x in params.get("cache_items",
                                                   "").split(';')]
            monitor_item_list = [ast.literal_eval(x)
                                 for x in params.get("monitor_items",
                                                     "").split(';')]
            for citem in range(len(cachetune_item_list)):
                logging.debug("cache %d " % citem)
                cachetunexml = vm_xml.CacheTuneXML()
                logging.debug("cachetunexml: %s" % cachetunexml)
                cachetunexml.vcpus = cachetune_item_list[citem]['vcpus']
                for cache in cache_item_list:
                    cachexml = cachetunexml.CacheXML()
                    cachexml.id = cache['id']
                    cachexml.level = cache['level']
                    cachexml.type = cache['type']
                    cachexml.size = cache['size']
                    cachexml.unit = cache['unit']
                    cachetunexml.set_cache(cachexml)

                for monitor in monitor_item_list:
                    monitorxml = cachetunexml.MonitorXML()
                    monitorxml.level = monitor['level']
                    monitorxml.vcpus = monitor['vcpus']
                    cachetunexml.set_monitor(monitorxml)
                cputunexml.set_cachetune(cachetunexml)

        vmxml.cputune = cputunexml
        logging.debug("vm xml: %s", vmxml)

        vmxml.sync()
        test_vm.start()

        # 5.Check resctrl dir and verify libvirt set right values
        check_membind_value(test, schemata_file1, mb_value1)
        check_membind_value(test, schemata_file2, mb_value2)
        found_mb = verify_membind_value(schemata_file1, mb_value1)
        if not found_mb:
            test.fail("The first schemata %s for vcpus is not set valid" %
                      schemata_file1)

        found_mb = verify_membind_value(schemata_file2, mb_value2)
        if not found_mb:
            test.fail("The second schemata %s for vcpus is not set valid" %
                      schemata_file2)

        # 6. Destroy the vm and verify the libvirt dir exist
        test_vm.destroy(gracefully=False)
        if os.path.exists(schemata_file1) or os.path.exists(schemata_file2):
            test.fail("The schemata file should be deleted after vm destroy")

    finally:
        if test_vm.is_alive():
            test_vm.destroy(gracefully=False)
        process.run("umount /sys/fs/resctrl",
                    verbose=True, shell=True)
        backup_xml.sync()
def guest_numa_check(vm, exp_vcpu):
    """
    To check numa node values

    :param vm: VM object
    :param exp_vcpu: dict of expected vcpus
    :return: True if check succeed, False otherwise
    """
    logging.debug("Check guest numa")
    session = vm.wait_for_login()
    vm_cpu_info = utils_misc.get_cpu_info(session)
    session.close()
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
    try:
        node_num_xml = len(vmxml.cpu.numa_cell)
    except (TypeError, LibvirtXMLNotFoundError):
        # handle if no numa cell in guest xml, bydefault node 0
        node_num_xml = 1
    node_num_guest = int(vm_cpu_info["NUMA node(s)"])
    exp_num_nodes = node_num_xml
    status = True
    for node in range(node_num_xml):
        try:
            node_cpu_xml = vmxml.cpu.numa_cell[node]['cpus']
            node_cpu_xml = libvirt.cpus_parser(node_cpu_xml)
        except (TypeError, LibvirtXMLNotFoundError):
            try:
                node_cpu_xml = vmxml.current_vcpu
            except LibvirtXMLNotFoundError:
                node_cpu_xml = vmxml.vcpu
            node_cpu_xml = list(range(int(node_cpu_xml)))
        try:
            node_mem_xml = vmxml.cpu.numa_cell[node]['memory']
        except (TypeError, LibvirtXMLNotFoundError):
            node_mem_xml = vmxml.memory
        node_mem_guest = int(vm.get_totalmem_sys(node=node))
        node_cpu_xml_copy = node_cpu_xml[:]
        for cpu in node_cpu_xml_copy:
            if int(cpu) >= int(exp_vcpu["guest_live"]):
                node_cpu_xml.remove(cpu)
        if (not node_cpu_xml) and node_mem_guest == 0:
            exp_num_nodes -= 1
        try:
            node_cpu_guest = vm_cpu_info["NUMA node%s CPU(s)" % node]
            node_cpu_guest = libvirt.cpus_parser(node_cpu_guest)
        except KeyError:
            node_cpu_guest = []
        # Check cpu
        if node_cpu_xml != node_cpu_guest:
            status = False
            logging.error("Mismatch in cpus in node %s: xml %s guest %s", node,
                          node_cpu_xml, node_cpu_guest)
        # Check memory
        if int(node_mem_xml) != node_mem_guest:
            status = False
            logging.error("Mismatch in memory in node %s: xml %s guest %s",
                          node, node_mem_xml, node_mem_guest)
    # Check no. of nodes
    if exp_num_nodes != node_num_guest:
        status = False
        logging.error("Mismatch in numa nodes expected nodes: %s guest: %s",
                      exp_num_nodes, node_num_guest)
    return status
Exemple #13
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError(
                    "Hugepage size [%s] isn't supported, "
                    "please verify kernel cmdline configuration." %
                    page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        qemu_conf.hugetlbfs_mount = mount_path
        libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i > max(node_list):
                    raise error.TestNAError("%s in nodeset out of range" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                if i['size'] != "1048576":
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                else:
                    # kernel 1G hugepage runtime number update not supported
                    # now, check whether current host setting satisfy
                    # requirement or not.
                    if i['num'] < node_val:
                        raise error.TestNAError(
                            "%s size hugepage number %s of"
                            " node %s not satisfy for "
                            "testing" % (i['size'], node_val, i['nodenum']))

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
Exemple #14
0
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
def run(test, params, env):
    """
    Test command: virsh guestvcpus

    The command query or modify state of vcpu in the vm
    1. Prepare test environment, start vm with guest agent
    2. Perform virsh guestvcpus query/enable/disable operation
    3. Check the cpus in the vm
    4. Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpus_num = int(params.get("vcpus_num", "4"))
    vcpus_placement = params.get("vcpus_placement", "static")
    option = params.get("option", "")
    combine = params.get("combine", "")
    invalid_domain = params.get("invalid_domain", "")
    domain_name = params.get("domain_name", "")
    invalid_cpulist = params.get("invalid_cpulist", "")
    status_error = params.get("status_error", "no")
    error_msg = params.get("error_msg", "no")
    vcpus_list = ""

    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_bakup = vmxml.copy()

    try:
        # Modify vm with static vcpus
        if vm.is_alive():
            vm.destroy()
        vmxml.placement = vcpus_placement
        vmxml.set_vm_vcpus(vm_name, vcpus_num, vcpus_num)
        logging.debug("Define guest with '%s' vcpus" % str(vcpus_num))

        # Start guest agent in vm
        vm.prepare_guest_agent(prepare_xml=False)

        # Normal test: disable/ enable guest vcpus
        if option and status_error == "no":
            for cpu in range(1, vcpus_num):
                virsh.guestvcpus(vm_name, str(cpu), option, debug=True)

        # Normal test: combine: --disable 1-max then --enable 1
        if combine == "yes" and status_error == "no":
            vcpus_list = '1' + '-' + str(vcpus_num - 1)
            option = "--disable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)
            vcpus_list = '1'
            option = "--enable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)

        # Error test: invalid_domain
        if invalid_domain == "yes":
            vm_name = domain_name
        # Error test: invalid_cpulist
        if invalid_cpulist == "yes":
            if option == "--enable":
                vcpus_list = str(vcpus_num)
            else:
                vcpus_list = '0' + '-' + str(vcpus_num - 1)
            ret = virsh.guestvcpus(vm_name, vcpus_list, option)
        else:
            # Query guest vcpus
            ret = virsh.guestvcpus(vm_name)
            output = ret.stdout.strip()

        # Check test results
        if status_error == "yes":
            libvirt.check_result(ret, error_msg)
        else:
            # Check the test result of query
            ret_output = dict([item.strip() for item in line.split(":")]
                              for line in output.split("\n"))
            if combine == "yes":
                online_vcpus = '0-1'
            elif option == "--disable":
                online_vcpus = '0'
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if ret_output["online"] != online_vcpus:
                test.fail("Query result is different from"
                          " the '%s' command." % option)

            # Check the cpu in guest
            session = vm.wait_for_login()
            vm_cpu_info = utils_misc.get_cpu_info(session)
            session.close()

            if combine == "yes":
                online_vcpus = '0,1'
                offline_vcpus = '2' + ',' + str(vcpus_num - 1)
            elif option == "--disable":
                online_vcpus = '0'
                offline_vcpus = '1' + '-' + str(vcpus_num - 1)
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)
                offline_vcpus = ""

            if offline_vcpus:
                if (vm_cpu_info["Off-line CPU(s) list"] != offline_vcpus or
                   vm_cpu_info["On-line CPU(s) list"] != online_vcpus):
                    test.fail("CPUs in vm is different from"
                              " the %s command." % option)
            elif vm_cpu_info["On-line CPU(s) list"] != online_vcpus:
                test.fail("On-line CPUs in vm is different"
                          " from the %s command." % option)
            else:
                logging.debug("lscpu in vm '%s' is: \n '%s'" %
                              (vm_name, vm_cpu_info))

    finally:
        # Recover VM
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_bakup.sync()
Exemple #16
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_arch = platform.machine()
        if not re.match(cpu_model_nodeinfo, cpu_arch):
            test.fail("Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = process.run(cmd, ignore_status=True,
                                  shell=True).stdout.strip()
        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = process.run(cmd, ignore_status=True,
                                 shell=True).stdout.strip()
        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'ppc' in cpu_arch:
                if cpus_nodeinfo != cpus_total:
                    test.fail("Virsh nodeinfo output of CPU(s) on"
                              " ppc did not match all threads in "
                              "the system")
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock|BogoMIPS' | "
               "head -n1 | awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cpu_frequency_os = cmd_result.stdout_text.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if (float(diffval) / float(cpu_frequency_nodeinfo) > 0.20
                or float(diffval) / float(cpu_frequency_os) > 0.20):
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        total_sockets_in_node = int(cmd_result.stdout_text.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cores_per_socket_os = cmd_result.stdout_text.strip()
        spec_numa = False
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            # for spec NUMA arch, the output of nodeinfo is in a spec format
            cpus_os = utils_misc.get_cpu_info().get("CPU(s)")
            numa_cells_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                  'NUMA cell(s)', 3)
            if (re.match(cores_per_socket_nodeinfo, cpus_os)
                    and re.match(numa_cells_nodeinfo, "1")):
                spec_numa = True
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "CPU(s) or Core(s) per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            test.fail("Virsh nodeinfo output didn't match Core(s) "
                      "per socket of virsh capabilities output")
        # Check Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if not spec_numa:
            if threads_per_core_nodeinfo != cpu_topology['threads']:
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        else:
            if threads_per_core_nodeinfo != "1":
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        if libvirt_version.version_compare(2, 0, 0):
            for i in node_online_list:
                node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
                memory_size_os += int(node_memory)
        else:
            memory_size_os = utils_memory.memtotal()
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            test.fail("Virsh nodeinfo output didn't match " "Memory size")
Exemple #17
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]), int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1", ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration."
                            % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i, mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                                       i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm "
                          "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected"
                          % (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." % topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'],
                                              i['nodenum'], i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Exemple #18
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError("Hugepage size [%s] isn't supported, "
                                      "please verify kernel cmdline configuration."
                                      % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        qemu_conf.hugetlbfs_mount = mount_path
        libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i > max(node_list):
                    raise error.TestNAError("%s in nodeset out of range" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                if i['size'] != "1048576":
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])
                else:
                    # kernel 1G hugepage runtime number update not supported
                    # now, check whether current host setting satisfy
                    # requirement or not.
                    if i['num'] < node_val:
                        raise error.TestNAError("%s size hugepage number %s of"
                                                " node %s not satisfy for "
                                                "testing" % (i['size'],
                                                             node_val,
                                                             i['nodenum']))

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
def run(test, params, env):
    """
    Test command: virsh guestvcpus

    The command query or modify state of vcpu in the vm
    1. Prepare test environment, start vm with guest agent
    2. Perform virsh guestvcpus query/enable/disable operation
    3. Check the cpus in the vm
    4. Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vcpus_num = int(params.get("vcpus_num", "20"))
    vcpus_placement = params.get("vcpus_placement", "static")
    option = params.get("option", "")
    combine = params.get("combine", "")
    invalid_domain = params.get("invalid_domain", "")
    domain_name = params.get("domain_name", "")
    invalid_cpulist = params.get("invalid_cpulist", "")
    status_error = params.get("status_error", "no")
    error_msg = params.get("error_msg", "no")
    vcpus_list = ""
    offline_vcpus = ""

    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_bakup = vmxml.copy()

    try:
        # Modify vm with static vcpus
        if vm.is_alive():
            vm.destroy()
        vmxml.placement = vcpus_placement
        vmxml.set_vm_vcpus(vm_name, vcpus_num, vcpus_num)
        logging.debug("Define guest with '%s' vcpus" % str(vcpus_num))

        # Start guest agent in vm
        vm.prepare_guest_agent(prepare_xml=False)

        # Normal test: disable/ enable guest vcpus
        if option and status_error == "no":
            for cpu in range(1, vcpus_num):
                virsh.guestvcpus(vm_name, str(cpu), option, debug=True)

        # Normal test: combine: --disable 1-max then --enable 1
        if combine == "yes" and status_error == "no":
            vcpus_list = '1' + '-' + str(vcpus_num - 1)
            option = "--disable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)
            vcpus_list = '1'
            option = "--enable"
            virsh.guestvcpus(vm_name, vcpus_list, option, debug=True)

        # Error test: invalid_domain
        if invalid_domain == "yes":
            vm_name = domain_name
        # Error test: invalid_cpulist
        if invalid_cpulist == "yes":
            if option == "--enable":
                vcpus_list = str(vcpus_num)
            else:
                vcpus_list = '0' + '-' + str(vcpus_num - 1)
            ret = virsh.guestvcpus(vm_name, vcpus_list, option)
        else:
            # Query guest vcpus
            ret = virsh.guestvcpus(vm_name)
            output = ret.stdout.strip()

        # Check test results
        if status_error == "yes":
            libvirt.check_result(ret, error_msg)
        else:
            # Check the test result of query
            ret_output = dict([item.strip() for item in line.split(":")]
                              for line in output.split("\n"))
            if combine == "yes":
                online_vcpus = '0-1'
            elif option == "--disable":
                online_vcpus = '0'
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if ret_output["online"] != online_vcpus:
                test.fail("Query result is different from"
                          " the '%s' command." % option)

            # Check the cpu in guest
            session = vm.wait_for_login()
            vm_cpu_info = utils_misc.get_cpu_info(session)
            session.close()

            if combine == "yes":
                online_vcpus = '0,1'
            elif option == "--disable":
                online_vcpus = '0'
                offline_vcpus = '1' + '-' + str(vcpus_num - 1)
            else:
                online_vcpus = '0' + '-' + str(vcpus_num - 1)

            if offline_vcpus:
                if (vm_cpu_info["Off-line CPU(s) list"] != offline_vcpus or
                        vm_cpu_info["On-line CPU(s) list"] != online_vcpus):
                    test.fail("CPUs in vm is different from"
                              " the %s command." % option)
            elif vm_cpu_info["On-line CPU(s) list"] != online_vcpus:
                test.fail("On-line CPUs in vm is different"
                          " from the %s command." % option)
            else:
                logging.debug("lscpu in vm '%s' is: \n '%s'" %
                              (vm_name, vm_cpu_info))

    finally:
        # Recover VM
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_bakup.sync()
Exemple #20
0
def guest_numa_check(vm, exp_vcpu):
    """
    To check numa node values

    :param vm: VM object
    :param exp_vcpu: dict of expected vcpus
    :return: True if check succeed, False otherwise
    """
    logging.debug("Check guest numa")
    session = vm.wait_for_login()
    vm_cpu_info = utils_misc.get_cpu_info(session)
    session.close()
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
    try:
        node_num_xml = len(vmxml.cpu.numa_cell)
    except (TypeError, LibvirtXMLNotFoundError):
        # handle if no numa cell in guest xml, bydefault node 0
        node_num_xml = 1
    node_num_guest = int(vm_cpu_info["NUMA node(s)"])
    exp_num_nodes = node_num_xml
    status = True
    for node in range(node_num_xml):
        try:
            node_cpu_xml = vmxml.cpu.numa_cell[node]['cpus']
            node_cpu_xml = libvirt.cpus_parser(node_cpu_xml)
        except (TypeError, LibvirtXMLNotFoundError):
            try:
                node_cpu_xml = vmxml.current_vcpu
            except LibvirtXMLNotFoundError:
                node_cpu_xml = vmxml.vcpu
            node_cpu_xml = list(range(int(node_cpu_xml)))
        try:
            node_mem_xml = vmxml.cpu.numa_cell[node]['memory']
        except (TypeError, LibvirtXMLNotFoundError):
            node_mem_xml = vmxml.memory
        node_mem_guest = int(vm.get_totalmem_sys(node=node))
        node_cpu_xml_copy = node_cpu_xml[:]
        for cpu in node_cpu_xml_copy:
            if int(cpu) >= int(exp_vcpu["guest_live"]):
                node_cpu_xml.remove(cpu)
        if (not node_cpu_xml) and node_mem_guest == 0:
            exp_num_nodes -= 1
        try:
            node_cpu_guest = vm_cpu_info["NUMA node%s CPU(s)" % node]
            node_cpu_guest = libvirt.cpus_parser(node_cpu_guest)
        except KeyError:
            node_cpu_guest = []
        # Check cpu
        if node_cpu_xml != node_cpu_guest:
            status = False
            logging.error("Mismatch in cpus in node %s: xml %s guest %s", node,
                          node_cpu_xml, node_cpu_guest)
        # Check memory
        if int(node_mem_xml) != node_mem_guest:
            status = False
            logging.error("Mismatch in memory in node %s: xml %s guest %s", node,
                          node_mem_xml, node_mem_guest)
    # Check no. of nodes
    if exp_num_nodes != node_num_guest:
        status = False
        logging.error("Mismatch in numa nodes expected nodes: %s guest: %s", exp_num_nodes,
                      node_num_guest)
    return status
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            test.fail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = utils.run(cmd, ignore_status=True).stdout.strip()
        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = utils.run(cmd, ignore_status=True).stdout.strip()
        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'power' in cpu_util.get_cpu_arch():
                if cpus_nodeinfo != cpus_total:
                    test.fail("Virsh nodeinfo output of CPU(s) on"
                              " ppc did not match all threads in "
                              "the system")
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock' | head -n1 | "
               "awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if (float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or
                float(diffval) / float(cpu_frequency_os) > 0.20):
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        total_sockets_in_node = int(cmd_result.stdout.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        spec_numa = False
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            # for spec NUMA arch, the output of nodeinfo is in a spec format
            cpus_os = utils_misc.get_cpu_info().get("CPU(s)")
            numa_cells_nodeinfo = _check_nodeinfo(
                nodeinfo_output, 'NUMA cell(s)', 3)
            if (re.match(cores_per_socket_nodeinfo, cpus_os) and
                    re.match(numa_cells_nodeinfo, "1")):
                spec_numa = True
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "CPU(s) or Core(s) per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            test.fail("Virsh nodeinfo output didn't match Core(s) "
                      "per socket of virsh capabilities output")
        # Check Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if not spec_numa:
            if threads_per_core_nodeinfo != cpu_topology['threads']:
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        else:
            if threads_per_core_nodeinfo != "1":
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        if libvirt_version.version_compare(2, 0, 0):
            for i in node_online_list:
                node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
                memory_size_os += int(node_memory)
        else:
            memory_size_os = utils_memory.memtotal()
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            test.fail("Virsh nodeinfo output didn't match "
                      "Memory size")
def run(test, params, env):
    """
    Test to change the kernel param based on user input.

    1. Prepare test environment, boot the guest
    2. Change the kernel parameter as per user input
    3. Reboot the guest and check whether /proc/cmdline reflects
    4. Check the boot log in guest dmesg and validate
    5. Perform any test operation if any, based on kernel param change
    6. Recover test environment
    """
    vms = params.get("vms").split()
    kernel_param = params.get("kernel_param", "quiet")
    kernel_param_remove = params.get("kernel_param_remove", "")
    if not kernel_param:
        kernel_param = None
    if not kernel_param_remove:
        kernel_param_remove = None
    cpu_check = params.get("hardware", "").upper()
    boot_log = params.get("boot_log", None)
    status_error = params.get("status_error", "no") == "yes"
    vm_dict = {}
    vm_list = env.get_all_vms()
    # To ensure host that doesn't support Radix MMU gets skipped
    if cpu_check:
        cpu_model = utils_misc.get_cpu_info()['Model name'].upper()
        if cpu_check not in cpu_model:
            logging.info("This test will work for %s", cpu_check)
            test.skip("Test is not applicable for %s" % cpu_model)
    # back up vmxml
    for vm_name in vms:
        vm_dict[vm_name] = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        for vm in vm_list:
            session = vm.wait_for_login()
            utils_test.update_boot_option(vm, args_added=kernel_param,
                                          args_removed=kernel_param_remove,
                                          need_reboot=True)
            if boot_log:
                session = vm.wait_for_login()
                # To ensure guest that doesn't support Radix MMU gets skipped
                if cpu_check:
                    cmd = "grep cpu /proc/cpuinfo | awk '{print $3}' | "
                    cmd += "head -n 1"
                    status, output = session.cmd_status_output(cmd)
                    if status:
                        test.error("couldn't get cpu information from guest "
                                   "%s" % vm.name)
                    if cpu_check not in output.upper() and "radix" in boot_log:
                        test.skip("radix MMU not supported in %s" % output)
                status, output = session.cmd_status_output("dmesg")
                if status:
                    logging.error(output)
                    test.error("unable to get dmesg from guest: %s" %
                               vm.name)
                if status_error:
                    if boot_log in output:
                        test.fail("Able to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("unable to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
                else:
                    if boot_log not in output:
                        test.fail("unable to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("Able to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
            if session:
                session.close()
    finally:
        # close the session and recover the vms
        if session:
            session.close()
        for vm in vm_list:
            vm.destroy()
            vm_dict[vm.name].sync()
def run(test, params, env):
    """
    Test to change the kernel param based on user input.

    1. Prepare test environment, boot the guest
    2. Change the kernel parameter as per user input
    3. Reboot the guest and check whether /proc/cmdline reflects
    4. Check the boot log in guest dmesg and validate
    5. Perform any test operation if any, based on kernel param change
    6. Recover test environment
    """
    vms = params.get("vms").split()
    kernel_param = params.get("kernel_param", "quiet")
    kernel_param_remove = params.get("kernel_param_remove", "")
    if not kernel_param:
        kernel_param = None
    if not kernel_param_remove:
        kernel_param_remove = None
    cpu_check = params.get("hardware", "").upper()
    boot_log = params.get("boot_log", None)
    status_error = params.get("status_error", "no") == "yes"
    vm_dict = {}
    vm_list = env.get_all_vms()
    # To ensure host that doesn't support Radix MMU gets skipped
    if cpu_check:
        cpu_model = utils_misc.get_cpu_info()['Model name'].upper()
        if cpu_check not in cpu_model:
            logging.info("This test will work for %s", cpu_check)
            test.skip("Test is not applicable for %s" % cpu_model)
    # back up vmxml
    for vm_name in vms:
        vm_dict[vm_name] = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        for vm in vm_list:
            session = vm.wait_for_login()
            utils_test.update_boot_option(vm,
                                          args_added=kernel_param,
                                          args_removed=kernel_param_remove,
                                          need_reboot=True)
            if boot_log:
                session = vm.wait_for_login()
                # To ensure guest that doesn't support Radix MMU gets skipped
                if cpu_check:
                    cmd = "grep cpu /proc/cpuinfo | awk '{print $3}' | "
                    cmd += "head -n 1"
                    status, output = session.cmd_status_output(cmd)
                    if status:
                        test.error("couldn't get cpu information from guest "
                                   "%s" % vm.name)
                    if cpu_check not in output.upper() and "radix" in boot_log:
                        test.skip("radix MMU not supported in %s" % output)
                status, output = session.cmd_status_output("dmesg")
                if status:
                    logging.error(output)
                    test.error("unable to get dmesg from guest: %s" % vm.name)
                if status_error:
                    if boot_log in output:
                        test.fail("Able to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("unable to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
                else:
                    if boot_log not in output:
                        test.fail("unable to find %s in dmesg of guest: "
                                  "%s" % (boot_log, vm.name))
                    logging.info("Able to find %s in dmesg of guest: %s",
                                 boot_log, vm.name)
            if session:
                session.close()
    finally:
        # close the session and recover the vms
        if session:
            session.close()
        for vm in vm_list:
            vm.destroy()
            vm_dict[vm.name].sync()