Esempio n. 1
0
def run(test, params, env):
    """
    Test virsh command virsh freepages
    """

    option = params.get("freepages_option", "")
    status_error = "yes" == params.get("status_error", "no")
    cellno = params.get("freepages_cellno")
    pagesize = params.get("freepages_page_size")
    huge_pages_num = params.get("huge_pages_num")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)

    for i in node_list:
        try:
            hp_cl.get_node_num_huge_pages(i, default_hp_size)
        except ValueError, e:
            logging.warning("%s", e)
            logging.debug('move node %s out of testing node list', i)
            node_list.remove(i)
def run(test, params, env):
    """
    Test the numanode info in cpu section.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    error_message = params.get("err_msg")
    pages_list = eval(params.get('nodes_pages'))
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    numa_info = utils_misc.NumaInfo()
    online_nodes = numa_info.get_online_nodes_withmem()
    ori_page_set = {}
    setup_host(online_nodes, pages_list, ori_page_set)
    try:
        if vm.is_alive():
            vm.destroy()
        update_xml(vm_name, online_nodes, params)
        # Try to start the VM with invalid node, fail is expected
        ret = virsh.start(vm_name, ignore_status=True)
        utils_test.libvirt.check_status_output(ret.exit_status,
                                               ret.stderr_text,
                                               expected_fails=[error_message])
    except Exception as e:
        test.error("Unexpected error: {}".format(e))
    finally:
        for node_index, ori_page in ori_page_set.items():
            process.run(
                'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'
                .format(ori_page, node_index),
                shell=True)
        backup_xml.sync()
Esempio n. 3
0
def prepare_vm(vm_name, test):
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    if len(node_list) < 2:
        test.cancel("Not enough numa nodes available")
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    numa_memnode = [{
        'cellid': '0',
        'mode': 'strict',
        'nodeset': str(node_list[0])
    }, {
        'cellid': '1',
        'mode': 'preferred',
        'nodeset': str(node_list[1])
    }]
    numa_cells = [{
        'id': '0',
        'memory': '512000',
        'unit': 'KiB'
    }, {
        'id': '1',
        'memory': '512000',
        'unit': 'KiB'
    }]
    vmxml.setup_attrs(
        **{
            'numa_memnode': numa_memnode,
            'cpu': {
                'reset_all': True,
                'mode': 'host-model',
                'numa_cell': numa_cells
            }
        })
    logging.debug('VM XML prior test: {}'.format(vmxml))
    vmxml.sync()
Esempio n. 4
0
def run(test, params, env):
    """
    Test numa node memory binding with automatic numa placement
    """

    logging.debug("The test has been started.")
    vm_name = params.get("main_vm")
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    replace_string = params.get("replace_string", '')

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    if len(node_list) < 2:
        test.cancel("The host only has {} numa node, but 2 is "
                    "required at least".format(len(node_list)))
    try:
        prepare_vm(vm_name, node_list)
        status = libvirt.exec_virsh_edit(vm_name, [replace_string])
        if status:
            test.fail(
                'Failure expected during virsh edit, but no failure occurs.')
        else:
            logging.info(
                'Virsh edit has failed, but that is intentional in '
                'negative cases.')
    except Exception as e:
        test.error("Unexpected error happened during the test execution: {}"
                   .format(e))
    finally:
        backup_xml.sync()
Esempio n. 5
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        host_cpu_count = utils.count_cpus()
        for i in range(host_cpu_count):
            ret = virsh.vcpupin(vm_name, 0, i, debug=True, ignore_status=True)
            if ret.exit_status:
                raise error.TestFail("vcpupin failed while numad running, %s"
                                     % bug_url)
            virsh.vcpuinfo(vm_name, debug=True)
    finally:
        utils.run("service numad stop")
        libvirtd.restart()
        backup_xml.sync()
Esempio n. 6
0
def prepare_host_for_test(params, test):
    """
    Setup host and return constants used by other functions

    :param params: dictionary of test parameters
    :param test: test object
    :return: The following constants are returned by this function:
    numa_memory - dictionary for numa memory setup
    oversize - memory taken by the main node + 50% of the first neighbour
    undersize - memory taken by the main node decreased by 25%
    memory_to_eat - memory to be used by the process - main node size + 10% of
    neighbour size
    neighbour - node number of the neighbour to be used for test
    nodeset_string - nodeset string to be used for numatune (build from the
    main node number and the neighbour node number)
    """
    # Create a NumaInfo object to get NUMA related information
    numa_info = utils_misc.NumaInfo()
    online_nodes = numa_info.get_online_nodes_withmem()
    if len(online_nodes) < 2:
        test.cancel("This test needs at least 2 available numa nodes")
    numa_memory = {
        'mode': params.get('memory_mode', 'strict'),
        # If nodeset is not defined in config, take a first node with memory.
        'nodeset': params.get('memory_nodeset', online_nodes[0])
    }
    # Get the size of a main node
    nodeset_size = int(
        numa_info.read_from_node_meminfo(int(numa_memory['nodeset']),
                                         'MemTotal'))
    # Get the size of a first neighbour with memory
    for node in online_nodes:
        if str(node) != numa_memory['nodeset']:
            neighbour = node
            break
    nodeset_nb_size = int(
        numa_info.read_from_node_meminfo(int(neighbour), 'MemTotal'))
    logging.debug('Memory available on a main node {} is {}'.format(
        numa_memory['nodeset'], nodeset_size))
    logging.debug('Memory available on a neighbour node {} is {}'.format(
        neighbour, nodeset_nb_size))
    # Increase a size by 50% of neighbour node
    oversize = int(nodeset_size + 0.5 * nodeset_nb_size)
    # Decrease nodeset size by 25%
    undersize = int(nodeset_size * 0.25)
    # Memory to eat is a whole nodeset + 10% of neighbour size
    memory_to_eat = int(nodeset_size + 0.1 * nodeset_nb_size)
    nodeset_string = '{},{}'.format(online_nodes[0], neighbour)
    process.run("swapoff -a", shell=True)
    if not utils_package.package_install('libcgroup-tools'):
        test.fail("Failed to install package libcgroup-tools on host.")

    return numa_memory, oversize, undersize, memory_to_eat, neighbour, nodeset_string
Esempio n. 7
0
def run(test, params, env):
    """
    Qemu numa basic test:
    1) Get host numa topological structure
    2) Start a guest and bind it on the cpus of one node
    3) Check the memory status of qemu process. It should mainly use the
       memory in the same node.
    4) Destroy the guest
    5) Repeat step 2 ~ 4 on every node in host

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    error_context.context("Get host numa topological structure", logging.info)
    timeout = float(params.get("login_timeout", 240))
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    for node_id in node_list:
        error_context.base_context("Bind qemu process to numa node %s" % node_id,
                                   logging.info)
        vm = "vm_bind_to_%s" % node_id
        params['qemu_command_prefix'] = "numactl --cpunodebind=%s" % node_id
        utils_memory.drop_caches()
        node_MemFree = int(host_numa_node.read_from_node_meminfo(node_id,
                                                                 "MemFree"))
        if node_MemFree < int(params["mem"]) * 1024:
            test.cancel("No enough free memory in node %d." % node_id)
        env_process.preprocess_vm(test, params, env, vm)
        vm = env.get_vm(vm)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=timeout)
        session.close()

        error_context.context("Check the memory use status of qemu process",
                              logging.info)
        memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node,
                                                           vm.get_pid())
        node_used_most = 0
        memory_sz_used_most = 0
        for index in range(len(node_list)):
            if memory_sz_used_most < memory_status[index]:
                memory_sz_used_most = memory_status[index]
                node_used_most = node_list[index]
            logging.debug("Qemu used %s pages in node"
                          " %s" % (memory_status[index], node_list[index]))
        if node_used_most != node_id:
            test.fail("Qemu still use memory from other node. "
                      "Expect: %s, used: %s" % (node_id, node_used_most))
        error_context.context("Destroy guest.", logging.info)
        vm.destroy()
Esempio n. 8
0
 def pin_vcpus2host_cpus():
     """ Pint the vcpus to the host cpus. """
     error_context.context("Pin vcpus to host cpus.", logging.info)
     host_numa_nodes = utils_misc.NumaInfo()
     vcpu_num = 0
     for numa_node_id in host_numa_nodes.nodes:
         numa_node = host_numa_nodes.nodes[numa_node_id]
         for _ in range(len(numa_node.cpus)):
             if vcpu_num >= len(vm.vcpu_threads):
                 break
             vcpu_tid = vm.vcpu_threads[vcpu_num]
             logging.debug("pin vcpu thread(%s) to cpu(%s)", vcpu_tid,
                           numa_node.pin_cpu(vcpu_tid))
             vcpu_num += 1
Esempio n. 9
0
def check_to_skip_case(params, test):
    """
    Check if the case should be skipped

    :param params: the dict the cases use
    :param test: test object
    :raises: test.cancel if skip criteria are matched
    """
    need_2_numa_node = "yes" == params.get("need_2_numa_node", "no")
    if need_2_numa_node:
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes_withmem
        logging.debug("host online nodes with memory %s", node_list)
        if len(node_list) <= 1:
            test.cancel("This case requires at least 2 numa host nodes, "
                        "but found '%s' numa host node" % len(node_list))
Esempio n. 10
0
def run(test, params, env):
    """
    Test capabilities with host numa node topology
    """
    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()
    try:
        new_cap = libvirt_xml.CapabilityXML()
        if not libvirtd.is_running():
            raise error.TestFail("Libvirtd is not running")
        topo = new_cap.cells_topology
        logging.debug("topo xml is %s", topo.xmltreefile)
        cell_list = topo.get_cell()
        numa_info = utils_misc.NumaInfo()
        node_list = numa_info.online_nodes

        for cell_num in range(len(cell_list)):
            # check node distances
            node_num = node_list[cell_num]
            cell_distance = cell_list[cell_num].sibling
            if cell_distance:
                logging.debug("cell %s distance is %s", node_num,
                              cell_distance)
                node_distance = numa_info.distances[node_num]
                for j in range(len(cell_list)):
                    if cell_distance[j]['value'] != node_distance[j]:
                        raise error.TestFail("cell distance value not "
                                             "expected.")
            # check node cell cpu
            cell_xml = cell_list[cell_num]
            cpu_list_from_xml = cell_xml.cpu
            node_ = numa_info.nodes[node_num]
            cpu_list = node_.cpus
            logging.debug("cell %s cpu list is %s", node_num, cpu_list)
            cpu_topo_list = []
            for cpu_id in cpu_list:
                cpu_dict = node_.get_cpu_topology(cpu_id)
                cpu_topo_list.append(cpu_dict)
            logging.debug("cpu topology list from capabilities xml is %s",
                          cpu_list_from_xml)
            if cpu_list_from_xml != cpu_topo_list:
                raise error.TestFail("cpu list %s from capabilities xml not "
                                     "expected.")
    finally:
        libvirtd.restart()
def get_cpu_range():
    """
    Get the range of all CPUs available on the system as a string

    :return: range of CPUs available as a string, for example: '1-63'
    """
    numa_info = utils_misc.NumaInfo()
    cpus_dict = numa_info.get_all_node_cpus()
    cpus = []
    for key in cpus_dict:
        cpus_node_list = [int(cpu) for cpu in cpus_dict[key].split(' ')
                          if cpu.isnumeric()]
        logging.debug('Following cpus found for node {}: {}'.
                      format(key, cpus_node_list))
        cpus += cpus_node_list
    cpu_range = '{}-{}'.format(min(cpus), max(cpus))
    logging.debug('The expected available cpu range is {}'.format(cpu_range))
    return cpu_range
Esempio n. 12
0
    def prepare_vmxml_mem(vmxml):
        """
        Prepare memory and numa settings in vmxml before hotplug dimm

        param vmxml: guest current xml
        """
        # Prepare memory settings
        vmxml.max_mem_rt = int(params.get("max_mem"))
        vmxml.max_mem_rt_slots = int(params.get("maxmem_slots"))
        mem_unit = params.get("mem_unit")
        vmxml.max_mem_rt_unit = mem_unit
        current_mem = int(params.get("current_mem"))
        vmxml.current_mem = current_mem
        vmxml.current_mem_unit = mem_unit
        vmxml.memory = int(params.get("memory"))
        # Prepare numa settings in <cpu>
        host_numa_node = utils_misc.NumaInfo()
        host_numa_node_list = host_numa_node.online_nodes
        numa_nodes = len(host_numa_node_list)
        if numa_nodes == 0:
            test.cancel("No host numa node available")
        numa_dict = {}
        numa_dict_list = []
        cpu_idx = 0
        for index in range(numa_nodes):
            numa_dict['id'] = str(index)
            numa_dict['memory'] = str(current_mem // numa_nodes)
            numa_dict['unit'] = mem_unit
            numa_dict['cpus'] = "%s-%s" % (str(cpu_idx), str(cpu_idx + 1))
            cpu_idx += 2
            numa_dict_list.append(numa_dict)
            numa_dict = {}
        if vmxml.xmltreefile.find('cpu'):
            vmxml_cpu = vmxml.cpu
            logging.debug("Existing cpu configuration in guest xml:\n%s", vmxml_cpu)
            vmxml_cpu.mode = 'host-model'
        else:
            vmxml_cpu = vm_xml.VMCPUXML()
            vmxml_cpu.xml = "<cpu mode='host-model'><numa/></cpu>"
        vmxml.vcpu = numa_nodes * 2
        vmxml_cpu.numa_cell = vmxml_cpu.dicts_to_cells(numa_dict_list)
        logging.debug(vmxml_cpu.numa_cell)
        vmxml.cpu = vmxml_cpu
        vmxml.sync()
Esempio n. 13
0
def run(test, params, env):
    """
    Start VM with numatune when set host cpu in unrelated numa node is offline.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    turned_off_cpu = ''
    cpu_index = params.get('cpu_index', "1")

    try:
        numa_info = utils_misc.NumaInfo()
        # Find a suitable cpu on first node with memory
        online_nodes = numa_info.get_online_nodes_withmem()
        node_0_cpus = numa_info.get_all_node_cpus()[online_nodes[0]].strip().split(' ')
        turned_off_cpu = node_0_cpus[int(cpu_index)]
        change_cpu_state_and_check(turned_off_cpu, 'off')

        if vm.is_alive():
            vm.destroy()
        if len(online_nodes) < 2:
            test.cancel("Cannot proceed with the test as there is not enough"
                        "NUMA nodes(>= 2) with memory.")
        memory_mode = params.get("memory_mode", "strict")
        # Update the numatune for second node with memory
        numa_memory = {'mode': memory_mode,
                       'nodeset': online_nodes[1]}
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login().close()
    except (exceptions.TestFail, exceptions.TestCancel):
        raise
    except Exception as e:
        test.error("Unexpected failure: {}.".format(e))
    finally:
        backup_xml.sync()
        if turned_off_cpu:
            change_cpu_state_and_check(turned_off_cpu, 'on')
Esempio n. 14
0
    def prepare_vmxml_mem(vmxml):
        """
        Prepare memory and numa settings in vmxml before hotplug dimm

        param vmxml: guest current xml
        """
        # Prepare memory settings
        vmxml.max_mem_rt = int(params.get("max_mem"))
        vmxml.max_mem_rt_slots = int(params.get("maxmem_slots"))
        mem_unit = params.get("mem_unit")
        vmxml.max_mem_rt_unit = mem_unit
        current_mem = int(params.get("current_mem"))
        vmxml.current_mem = current_mem
        vmxml.current_mem_unit = mem_unit
        vmxml.memory = int(params.get("memory"))
        vmxml.sync()
        # Prepare numa settings in <cpu>
        host_numa_node = utils_misc.NumaInfo()
        host_numa_node_list = host_numa_node.online_nodes
        numa_nodes = len(host_numa_node_list)
        if numa_nodes == 0:
            test.cancel("No host numa node available")
        numa_dict = {}
        numa_dict_list = []
        cpu_idx = 0
        for each_node in host_numa_node_list:
            numa_dict['id'] = str(each_node)
            numa_dict['memory'] = str(current_mem // numa_nodes)
            numa_dict['unit'] = mem_unit
            numa_dict['cpus'] = "%s-%s" % (str(cpu_idx), str(cpu_idx + 1))
            cpu_idx += 2
            numa_dict_list.append(numa_dict)
            numa_dict = {}
        vmxml.vcpu = numa_nodes * 2
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        logging.debug(vmxml_cpu.numa_cell)
        vmxml.cpu = vmxml_cpu
        vmxml.sync()
def check_vcpupin(vm_name, cpu_range, config=''):
    """
    Check the output of the vcpupin command with auto placement.

    :param vm_name: name of the VM to be executed on
    :param cpu_range: range of CPUs available as a string
    :param config: config parameter as a string, empty by default
    """
    numa_info = utils_misc.NumaInfo()
    result = virsh.vcpupin(vm_name, options=config, debug=True,
                           ignore_status=False)
    range_found = False
    for node in numa_info.get_online_nodes_withcpu():
        if re.search('{}\s*{}'.format(node, cpu_range), result.stdout_text):
            logging.debug('Expected cpu range: {} found in stdout for '
                          'node: {}.'.format(cpu_range, node))
            range_found = True
        else:
            logging.debug('Node {} has no cpu range'.format(node))
        if not range_found:
            raise TestFail('Expected cpu range: {} not found in stdout of '
                           'vcpupin command.'.format(cpu_range))
Esempio n. 16
0
def update_xml(vm_name, params):
    numa_info = utils_misc.NumaInfo()
    online_nodes = numa_info.get_all_nodes()
    cpu_mode = params.get('cpu_mode', 'host-model')
    # Prepare a memnode list
    numa_memnode = [{
        'mode':
        params.get('memory_mode', 'strict'),
        'cellid':
        params.get('cellid', '0'),
        # Take a first node above available nodes.
        'nodeset':
        params.get('memory_nodeset', str(int(online_nodes[-1]) + 1))
    }]
    # Prepare a numa cells list
    numa_cells = [{
        'id': '0',
        # cpus attribute is optional and if omitted a CPU-less NUMA node is
        # created as per libvirt.org documentation.
        'memory': '512000',
        'unit': 'KiB'
    }]
    vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    del vmxml.numa_memory

    vmxml.setup_attrs(
        **{
            'numa_memnode': numa_memnode,
            'cpu': {
                'reset_all': True,
                'mode': cpu_mode,
                'numa_cell': numa_cells
            }
        })

    logging.debug("vm xml is %s", vmxml)
    vmxml.sync()
Esempio n. 17
0
def set_numa_parameter(params, cgstop):
    """
    Set the numa parameters
    :params: the parameter dictionary
    :cgstop: whether cg were stopped prior to get
    """
    #vm_name = params.get("vms")
    vm_name = params.get("main_vm")
    mode = params.get("numa_mode")
    nodeset = params.get("numa_nodeset")
    options = params.get("options", None)
    start_vm = params.get("start_vm", "yes")

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)

    # Don't use libvirt_xml here because testing numatune command
    result = virsh.numatune(vm_name, mode, nodeset, options, debug=True)
    status = result.exit_status

    # Check status_error
    status_error = params.get("status_error", "no")

    # For a running domain, the mode can't be changed, and the nodeset can
    # be changed only the domain was started with a mode of 'strict'
    if mode == "strict" and start_vm == "yes":
        status_error = "no"

    # TODO, the '--config' option will affect next boot, and if the guest
    # is shutoff status, the '--current' option will be equivalent to
    # '--config', if users give a specified nodeset range is more than
    # host NUMA nodes, and use virsh numatune with '--config' or '--current'
    # option to set the invalid nodeset to a guest with shutoff status, and
    # then virsh numatune will return 0 rather than 1, because the libvirt just
    # check it when starting the guest, however, the current virsh.start()
    # can't meet this requirement.

    if status_error == "yes":
        if status:
            logging.info("It's an expected error")
        else:
            # If we stopped control groups, then we expect a different
            # result in this failure case; however, if there were no
            # control groups to stop, then don't error needlessly
            if cgstop:
                raise error.TestFail("Unexpected return code %d" % status)
            else:
                logging.info("Control groups stopped, thus expected success")
    elif status_error == "no":
        if status:
            used_node = cpus_parser(nodeset)
            if not set(used_node).issubset(node_list):
                raise error.TestNAError("Host does not support requested"
                                        " nodeset %s" % used_node)
            else:
                raise error.TestFail(result.stderr)
        else:
            if check_numatune_xml(params):
                logging.info(result.stdout)
            else:
                raise error.TestFail("The 'mode' or/and 'nodeset' are"
                                     " inconsistent with numatune XML")
Esempio n. 18
0
def run(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        test.cancel("Host only has one NUMA node, skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = params.get_numeric("tmpfs_size")
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size == 0:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = get_tmpfs_write_speed()
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)
    utils_memory.drop_caches()

    if utils_memory.freememtotal() < tmpfs_size:
        test.cancel("Host does not have enough free memory to run the test, "
                    "skipping test...")

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    test_mem = float(params.get("mem")) * float(params.get("mem_ratio", 0.8))
    stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2)
    most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if os.path.exists(memory_file):
            os.remove(memory_file)
        utils_memory.drop_caches()
        if utils_memory.freememtotal() < tmpfs_size:
            test.error("Don't have enough memory to execute this "
                       "test after %s round" % test_round)
        error_context.context("Executing stress test round: %s" % test_round,
                              logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error_context.context(
            "Try to allocate memory in node %s" % numa_node_malloc,
            logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            process.system(numa_dd_cmd, timeout=dd_timeout, shell=True)
        except Exception as error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                test.fail("Can not allocate memory in node %s."
                          " Error message:%s" %
                          (numa_node_malloc, str(error_msg)))
        error_context.context("Run memory heavy stress in guest", logging.info)
        stress_test = utils_test.VMStress(vm,
                                          "stress",
                                          params,
                                          stress_args=stress_args)
        stress_test.load_stress_tool()
        error_context.context("Get the qemu process memory use status",
                              logging.info)
        node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            test.fail("Memory still stick in node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        stress_test.unload_stress()
        stress_test.clean()
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()

    session.close()
Esempio n. 19
0
def run(test, params, env):
    """
    KVM multi test:
    1) Log into guests
    2) Check all the nics available or not
    3) Ping among guest nic and host
       3.1) Ping with different packet size
       3.2) Flood ping test
       3.3) Final ping test
    4) Transfer files among guest nics and host
       4.1) Create file by dd command in guest
       4.2) Transfer file between nics
       4.3) Compare original file and transferred file
    5) ping among different nics
       5.1) Ping with different packet size
       5.2) Flood ping test
       5.3) Final ping test
    6) Transfer files among different nics
       6.1) Create file by dd command in guest
       6.2) Transfer file between nics
       6.3) Compare original file and transferred file
    7) Repeat step 3 - 6 on every nic.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def ping(session, nic, dst_ip, strick_check, flood_minutes):
        d_packet_size = [
            1, 4, 48, 512, 1440, 1500, 1505, 4054, 4055, 4096, 4192, 8878,
            9000, 32767, 65507
        ]
        packet_size = params.get("packet_size", "").split() or d_packet_size
        for size in packet_size:
            error_context.context("Ping with packet size %s" % size,
                                  logging.info)
            status, output = utils_test.ping(dst_ip,
                                             10,
                                             interface=nic,
                                             packetsize=size,
                                             timeout=30,
                                             session=session)
            if strict_check:
                ratio = utils_test.get_loss_ratio(output)
                if ratio != 0:
                    test.fail("Loss ratio is %s for packet size"
                              " %s" % (ratio, size))
            else:
                if status != 0:
                    test.fail("Ping returns non-zero value %s" % output)

        error_context.context("Flood ping test", logging.info)
        utils_test.ping(dst_ip,
                        None,
                        interface=nic,
                        flood=True,
                        output_func=None,
                        timeout=flood_minutes * 60,
                        session=session)
        error_context.context("Final ping test", logging.info)
        counts = params.get("ping_counts", 100)
        status, output = utils_test.ping(dst_ip,
                                         counts,
                                         interface=nic,
                                         timeout=float(counts) * 1.5,
                                         session=session)
        if strick_check == "yes":
            ratio = utils_test.get_loss_ratio(output)
            if ratio != 0:
                test.fail("Packet loss ratio is %s after flood" % ratio)
        else:
            if status != 0:
                test.fail("Ping returns non-zero value %s" % output)

    def file_transfer(session, src, dst):
        username = params.get("username", "")
        password = params.get("password", "")
        src_path = "/tmp/1"
        dst_path = "/tmp/2"
        port = int(params["file_transfer_port"])

        cmd = "dd if=/dev/urandom of=%s bs=100M count=1" % src_path
        cmd = params.get("file_create_cmd", cmd)

        error_context.context("Create file by dd command, cmd: %s" % cmd,
                              logging.info)
        session.cmd(cmd)

        transfer_timeout = int(params.get("transfer_timeout"))
        log_filename = "scp-from-%s-to-%s.log" % (src, dst)
        error_context.context("Transfer file from %s to %s" % (src, dst),
                              logging.info)
        remote.scp_between_remotes(src,
                                   dst,
                                   port,
                                   password,
                                   password,
                                   username,
                                   username,
                                   src_path,
                                   dst_path,
                                   log_filename=log_filename,
                                   timeout=transfer_timeout)
        src_path = dst_path
        dst_path = "/tmp/3"
        log_filename = "scp-from-%s-to-%s.log" % (dst, src)
        error_context.context("Transfer file from %s to %s" % (dst, src),
                              logging.info)
        remote.scp_between_remotes(dst,
                                   src,
                                   port,
                                   password,
                                   password,
                                   username,
                                   username,
                                   src_path,
                                   dst_path,
                                   log_filename=log_filename,
                                   timeout=transfer_timeout)
        error_context.context("Compare original file and transferred file",
                              logging.info)

        cmd1 = "md5sum /tmp/1"
        cmd2 = "md5sum /tmp/3"
        md5sum1 = session.cmd(cmd1).split()[0]
        md5sum2 = session.cmd(cmd2).split()[0]
        if md5sum1 != md5sum2:
            test.error("File changed after transfer")

    nic_interface_list = []
    check_irqbalance_cmd = params.get("check_irqbalance_cmd")
    stop_irqbalance_cmd = params.get("stop_irqbalance_cmd")
    start_irqbalance_cmd = params.get("start_irqbalance_cmd")
    status_irqbalance = params.get("status_irqbalance")
    vms = params["vms"].split()
    host_mem = utils_memory.memtotal() / (1024 * 1024)
    host_cpu_count = len(utils_misc.get_cpu_processors())
    vhost_count = 0
    if params.get("vhost"):
        vhost_count = 1
    if host_cpu_count < (1 + vhost_count) * len(vms):
        test.error("The host don't have enough cpus to start guest"
                   "pcus: %d, minimum of vcpus and vhost: %d" %
                   (host_cpu_count, (1 + vhost_count) * len(vms)))
    params['mem'] = host_mem / len(vms) * 1024
    params['smp'] = host_cpu_count / len(vms) - vhost_count
    if params['smp'] % 2 != 0:
        params['vcpu_sockets'] = 1
    params["start_vm"] = "yes"
    for vm_name in vms:
        env_process.preprocess_vm(test, params, env, vm_name)
    timeout = float(params.get("login_timeout", 360))
    strict_check = params.get("strick_check", "no")
    host_ip = utils_net.get_ip_address_by_interface(params.get("netdst"))
    host_ip = params.get("srchost", host_ip)
    flood_minutes = float(params["flood_minutes"])
    error_context.context("Check irqbalance service status", logging.info)
    o = process.system_output(check_irqbalance_cmd,
                              ignore_status=True,
                              shell=True)
    check_stop_irqbalance = False
    if re.findall(status_irqbalance, o):
        logging.debug("stop irqbalance")
        process.run(stop_irqbalance_cmd, shell=True)
        check_stop_irqbalance = True
        o = process.system_output(check_irqbalance_cmd,
                                  ignore_status=True,
                                  shell=True)
        if re.findall(status_irqbalance, o):
            test.error("Can not stop irqbalance")
    thread_list = []
    nic_interface = []
    for vm_name in vms:
        guest_ifname = ""
        guest_ip = ""
        vm = env.get_vm(vm_name)
        session = vm.wait_for_login(timeout=timeout)
        thread_list.extend(vm.vcpu_threads)
        thread_list.extend(vm.vhost_threads)
        error_context.context("Check all the nics available or not",
                              logging.info)
        for index, nic in enumerate(vm.virtnet):
            guest_ifname = utils_net.get_linux_ifname(session, nic.mac)
            guest_ip = vm.get_address(index)
            if not (guest_ifname and guest_ip):
                err_log = "vms %s get ip or ifname failed." % vm_name
                err_log = "ifname: %s, ip: %s." % (guest_ifname, guest_ip)
                test.fail(err_log)
            nic_interface = [guest_ifname, guest_ip, session]
            nic_interface_list.append(nic_interface)
    error_context.context("Pin vcpus and vhosts to host cpus", logging.info)
    host_numa_nodes = utils_misc.NumaInfo()
    vthread_num = 0
    for numa_node_id in host_numa_nodes.nodes:
        numa_node = host_numa_nodes.nodes[numa_node_id]
        for _ in range(len(numa_node.cpus)):
            if vthread_num >= len(thread_list):
                break
            vcpu_tid = thread_list[vthread_num]
            logging.debug("pin vcpu/vhost thread(%s) to cpu(%s)" %
                          (vcpu_tid, numa_node.pin_cpu(vcpu_tid)))
            vthread_num += 1

    nic_interface_list_len = len(nic_interface_list)
    # ping and file transfer test
    for src_ip_index in range(nic_interface_list_len):
        error_context.context("Ping test from guest to host", logging.info)
        src_ip_info = nic_interface_list[src_ip_index]
        ping(src_ip_info[2], src_ip_info[0], host_ip, strict_check,
             flood_minutes)
        error_context.context("File transfer test between guest and host",
                              logging.info)
        file_transfer(src_ip_info[2], src_ip_info[1], host_ip)
        for dst_ip in nic_interface_list[src_ip_index:]:
            if src_ip_info[1] == dst_ip[1]:
                continue
            txt = "Ping test between %s and %s" % (src_ip_info[1], dst_ip[1])
            error_context.context(txt, logging.info)
            ping(src_ip_info[2], src_ip_info[0], dst_ip[1], strict_check,
                 flood_minutes)
            txt = "File transfer test between %s " % src_ip_info[1]
            txt += "and %s" % dst_ip[1]
            error_context.context(txt, logging.info)
            file_transfer(src_ip_info[2], src_ip_info[1], dst_ip[1])
    if check_stop_irqbalance:
        process.run(start_irqbalance_cmd, shell=True)
Esempio n. 20
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            test.fail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        ret = process.run(cmd, shell=True)
        cpu_num = int(ret.stdout_text.split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num + 1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            test.fail("vcpuinfo cpu affinity not expected")

    def numa_mode_check(mode_nodeset):
        """
        when the mode = 'preferred' or 'interleave', it is better to check
        numa_maps.
        """
        vm_pid = vm.get_pid()
        numa_map = '/proc/%s/numa_maps' % vm_pid
        # Open a file
        with open(numa_map) as file:
            for line in file.readlines():
                if line.split()[1] != mode_nodeset:
                    test.fail("numa node and nodeset %s is "
                              "not expected" % mode_nodeset)

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", " ".join(map(str, node_list)))

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = numa_memory["nodeset"]
            if '-' in nodes:
                for nnode in range(int(nodes.split('-')[0]),
                                   int(nodes.split('-')[1]) + 1):
                    ppc_memory_nodeset += str(node_list[nnode]) + ','
            else:
                node_lst = nodes.split(',')
                for nnode in range(len(node_lst)):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[nnode])]) + ','
            numa_memory["nodeset"] = ppc_memory_nodeset[:-1]
        except (KeyError, IndexError):
            pass

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop", ignore_status=True)
            process.run("virtlogd -d")
        except path.CmdNotFoundError:
            pass

        # Allow for more times to libvirtd restarted successfully.
        ret = utils_misc.wait_for(lambda: libvirtd.is_working(),
                                  timeout=240,
                                  step=1)
        if not ret:
            test.fail("Libvirtd hang after restarted")

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num + 1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes'

        if numa_memory.get('nodeset'):
            used_node = cpu.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    if not dynamic_parameters:
                        test.cancel("nodeset %s out of range" %
                                    numa_memory['nodeset'])
                    else:
                        if '-' in numa_memory['nodeset']:
                            nodes_size = len(numa_memory['nodeset'].split('-'))
                        else:
                            nodes_size = len(numa_memory['nodeset'].split(','))
                        if nodes_size > len(node_list):
                            test.cancel("nodeset %s out of range" %
                                        numa_memory['nodeset'])
                        else:
                            numa_memory['nodeset'] = node_list[:nodes_size]

        if vcpu_cpuset:
            pre_cpuset = cpu.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                if not dynamic_parameters:
                    test.cancel("cpuset %s out of range" % vcpu_cpuset)
                else:
                    random_cpus = []
                    # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is
                    # added twice or the list of selected CPUs is not long enough
                    for i in range(
                            len([int(i) for i in vcpu_cpuset.split(',')])):
                        rand_cpu = random.randint(min(cpu_list), max(cpu_list))
                        while rand_cpu in random_cpus:
                            rand_cpu = random.randint(min(cpu_list),
                                                      max(cpu_list))
                        random_cpus.append(rand_cpu)
                    random_cpus.sort()
                    vcpu_cpuset = (','.join(
                        [str(cpu_num) for cpu_num in random_cpus]))
                    pre_cpuset = cpu.cpus_parser(vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vmxml.xmltreefile.find('cputune'):
            vmxml.xmltreefile.remove_by_xpath('/cputune')
        else:
            logging.debug('No vcpupin found')
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem // 1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                test.fail("memory config %s not expected "
                          "after domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    test.fail("cpuset not found in domain xml.")

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n "
                          "error: %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node, vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    test.fail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            if numa_memory.get('mode') == 'strict':
                left_node = [
                    node_list.index(i) for i in node_list if i not in used_node
                ]
                used_node = [node_list.index(i) for i in used_node]
                mem_compare(used_node, left_node)
            elif numa_memory.get('mode') == 'preferred':
                mode_nodeset = 'prefer:' + numa_memory.get('nodeset')
                numa_mode_check(mode_nodeset)
            else:
                mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get(
                    'nodeset')
                numa_mode_check(mode_nodeset)
        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                test.fail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                logging.warning('numa log:\n%s\n' %
                                numad_log[0].split("numad ")[-1])
                logging.warning('numa cmd opt:\n%s\n' % numad_cmd_opt)
                test.fail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = cpu.cpus_parser(numad_ret)
            left_node = [
                node_list.index(i) for i in node_list if i not in numad_node
            ]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                if numa_memory.get('mode') == 'strict':
                    mem_compare(numad_node_seq, left_node)
                elif numa_memory.get('mode') == 'preferred':
                    mode_nodeset = 'prefer:' + numad_ret
                    numa_mode_check(mode_nodeset)
                else:
                    mode_nodeset = numa_memory.get('mode') + ':' + numad_ret
                    numa_mode_check(mode_nodeset)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        test.fail("cpu usage in node %s is not expected" % i)
                cpu_affinity_check(node=numad_node)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Esempio n. 21
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise exceptions.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes_withmem
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise exceptions.TestSkipError("At least 2 numa nodes are needed on"
                                       " host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf")
    with open(config_path, 'a') as f:
        pass
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)
        # As libvirtd start as session use root, need stop virtlogd service
        # and start it as daemon to fix selinux denial
        try:
            path.find_command('virtlogd')
            process.run("service virtlogd stop",
                        ignore_status=True,
                        shell=True)
            process.run("virtlogd -d", shell=True)
        except path.CmdNotFoundError:
            pass

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise exceptions.TestSkipError("nodeset %s out of range" %
                                                   numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as e:
            raise exceptions.TestFail("Test failed in positive case.\n "
                                      "error: %s" % e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise exceptions.TestFail("numad usage not found in libvirtd"
                                          " log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise exceptions.TestFail("numa memory config %s not expected"
                                          " after live update" %
                                          numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            # memory_status is a total numa list. node_list could not
            # match the count of nodes
            total_online_node_list = host_numa_node.online_nodes
            left_node_new = [
                total_online_node_list.index(i) for i in total_online_node_list
                if i != node
            ]
            used_node = [total_online_node_list.index(node)]

            mem_compare(used_node, left_node_new)

    finally:
        try:
            path.find_command('virtlogd')
            process.run('pkill virtlogd', ignore_status=True, shell=True)
            process.run('systemctl restart virtlogd.socket',
                        ignore_status=True,
                        shell=True)
        except path.CmdNotFoundError:
            pass
        libvirtd.exit()
        if config_path:
            config.restore()
            if os.path.exists(config_path):
                os.remove(config_path)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Esempio n. 22
0
def run(test, params, env):
    """
    Test virsh command virsh freepages
    """
    def get_freepages_number_of_node(node, pagesize):
        """
        Get number of hugepages for node

        :params node: node No
        :params pagesize: page size, for example: '4', '2048', '1048576'
            For pagesize is '4' which is not hugepage, get free memory from
                `grep MemFree /sys/devices/system/node/node${node}/meminfo`
            For pagesize '2048' or '1048576',  get free page number from
                /sys/devices/system/node/nodei${node}/hugepages/
                hugepages-${pagesize}/free_hugepages
        :return: integer number of free pagesize
        """
        if pagesize == '4':
            node_meminfo = host_numa_node.read_from_node_meminfo(
                node, 'MemFree')
            return int(node_meminfo) / 4
        else:
            return hp_cl.get_free_hugepages(node, pagesize)

    def check_hugepages_allocated_status(is_type_check, pagesize, quantity):
        """
        Allocate some specific hugepages and check whether the number of this
        specific hugepages allocated by kernel totally is equal to the sum that
        allocated for each nodes, and also check if the number of freepages
        for each node is equal to the number allocated by kernel for each
        node. If yes, return True, else return False.

        :params is_type_check: Flag to determine do this check or not
        :params pagesize: pagesize type needed to check
        :params quantity: the number will be allocated
        :return: True if current system support this pagesize and allocate
                 page successfully, otherwise return False
        """
        if not is_type_check or pagesize not in supported_hp_size:
            return False
        hp_cl.set_kernel_hugepages(pagesize, quantity)
        # kernel need some time to prepare hugepages due to maybe there
        # are some memory fragmentation.
        time.sleep(1)
        # Check hugepages allocated by kernel is equal to the sum of all nodes
        actual_quantity = hp_cl.get_kernel_hugepages(pagesize)
        if 0 == actual_quantity:
            # Not enough free memory for support current hugepages type.
            logging.debug('%s is not available.' % pagesize)
            return False
        else:
            hp_sum = 0
            for node in node_list:
                try:
                    allocate_pages = hp_cl.get_node_num_huge_pages(
                        node, pagesize)
                    free_pages = hp_cl.get_free_hugepages(node, pagesize)
                    hp_sum += allocate_pages
                except ValueError as exc:
                    logging.warning("%s", str(exc))
                    logging.debug('move node %s out of testing node list',
                                  node)
                    node_list.remove(node)
                    oth_node.append(node)
                if allocate_pages != free_pages:
                    # Check if allocated hugepages is equal to freepages for
                    # each node
                    test.error("allocated %sKiB hugepages is not equal to "
                               "free pages on node%s." % (pagesize, node))
            if hp_sum != actual_quantity:
                test.error("the sum of %sKiB hugepages of all nodes is not "
                           "equal to kernel allocated totally." % pagesize)
            return True

    option = params.get("freepages_option", "")
    status_error = "yes" == params.get("status_error", "no")
    cellno = params.get("freepages_cellno")
    pagesize = params.get("freepages_pagesize")

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    oth_node = []

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()

    cellno_list = []
    if cellno == "EACH":
        cellno_list = node_list
    elif cellno == "OUT_OF_RANGE":
        cellno_list.append(max(node_list) + 1)
    else:
        cellno_list.append(cellno)

    # Add 4K pagesize
    supported_hp_size.insert(0, '4')
    pagesize_list = []
    if pagesize == "EACH":
        pagesize_list = supported_hp_size
    else:
        pagesize_list.append(pagesize)

    if not status_error:
        # Get if CPU support 2M-hugepages
        check_2M = vm_xml.VMCPUXML.check_feature_name('pse')
        # Get if CPU support 1G-hugepages
        check_1G = vm_xml.VMCPUXML.check_feature_name('pdpe1gb')
        if not check_2M and not check_1G:
            test.cancel("pse and pdpe1gb flags are not supported by CPU.")

        num_1G = str(1024 * 1024)
        num_2M = str(2048)

        # Let kernel allocate 64 of 2M hugepages at runtime
        quantity_2M = 64
        # Let kernel allocate 2 of 1G hugepages at runtime
        quantity_1G = 2
        check_2M = check_hugepages_allocated_status(check_2M, num_2M,
                                                    quantity_2M)
        check_1G = check_hugepages_allocated_status(check_1G, num_1G,
                                                    quantity_1G)

        if not check_2M and not check_1G:
            test.cancel("Not enough free memory to support huge pages "
                        "in current system.")

    # Run test
    for cell in cellno_list:
        for page in pagesize_list:
            result = virsh.freepages(cellno=cell,
                                     pagesize=page,
                                     options=option,
                                     debug=True)
            # Unify pagesize unit to KiB
            if page is not None:
                if page.upper() in ['2M', '2048K', '2048KIB']:
                    page = num_2M
                elif page.upper() in ['1G', '1048576K', '1048576KIB']:
                    page = num_1G
            # the node without memory will fail and it is expected
            if cell is not None and page is None and not status_error:
                status_error = True

            utlv.check_exit_status(result, status_error)
            expect_result_list = []
            # Check huge pages
            if not status_error:
                if '--all' == option:
                    # For --all
                    for i in node_list:
                        node_dict = {}
                        node_dict['Node'] = "Node %s" % i
                        if page is None:
                            # For --all(not specify specific pagesize)
                            for page_t in supported_hp_size:
                                page_k = "%sKiB" % page_t
                                node_dict[page_k] = str(
                                    get_freepages_number_of_node(i, page_t))
                        else:
                            # For --all --pagesize
                            page_k = "%sKiB" % page
                            node_dict[page_k] = str(
                                get_freepages_number_of_node(i, page))
                        expect_result_list.append(node_dict)
                else:
                    # For --cellno
                    if page is not None:
                        # For --cellno --pagesize
                        node_dict = {}
                        page_k = "%sKiB" % page
                        node_dict[page_k] = str(
                            get_freepages_number_of_node(cell, page))
                        expect_result_list = [node_dict]

                if check_freepages(result.stdout.strip(), expect_result_list):
                    logging.info("Huge page freepages check pass")
                else:
                    test.fail("Huge page freepages check failed, "
                              "expect result is %s" % expect_result_list)
Esempio n. 23
0
def run(test, params, env):
    """
    Re-assign nr-hugepages

    1. Set up hugepage with 1G page size and hugepages=8
    2. Boot up guest using /mnt/kvm_hugepage as backend in QEMU CML
    3. Change the nr_hugepages after the guest boot up (6 and 10)
    4. Run the stress test **inside guest**
    5. change the nr_hugepages after the stress test (6 and 10)

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """

    def set_hugepage():
        """Set nr_hugepages"""
        for h_size in (origin_nr - 2, origin_nr + 2):
            hp_config.target_hugepages = h_size
            hp_config.set_hugepages()
            if params.get('on_numa_node'):
                logging.info('Set hugepage size %s to target node %s' % (
                    h_size, target_node))
                hp_config.set_node_num_huge_pages(h_size, target_node,
                                                  hugepage_size)

    origin_nr = int(params['origin_nr'])
    host_numa_node = utils_misc.NumaInfo()
    mem = int(float(utils_misc.normalize_data_size("%sM" % params["mem"])))
    if params.get('on_numa_node'):
        for target_node in host_numa_node.get_online_nodes_withmem():
            node_mem_free = host_numa_node.read_from_node_meminfo(
                target_node, 'MemFree')
            if int(node_mem_free) > mem:
                params['target_nodes'] = target_node
                params["qemu_command_prefix"] = ("numactl --membind=%s" %
                                                 target_node)
                params['target_num_node%s' % target_node] = origin_nr
                break
            logging.info(
                'The free memory of node %s is %s, is not enough for'
                ' guest memory: %s' % (target_node, node_mem_free, mem))
        else:
            test.cancel("No node on your host has sufficient free memory for "
                        "this test.")
    hp_config = test_setup.HugePageConfig(params)
    hp_config.target_hugepages = origin_nr
    logging.info('Setup hugepage number to %s' % origin_nr)
    hp_config.setup()
    hugepage_size = utils_memory.get_huge_page_size()

    params['start_vm'] = "yes"
    vm_name = params['main_vm']
    env_process.preprocess_vm(test, params, env, vm_name)
    vm = env.get_vm(vm_name)
    params['stress_args'] = '--vm %s --vm-bytes 256M --timeout 30s' % (
            mem // 512)
    logging.info('Loading stress on guest.')
    stress = utils_test.VMStress(vm, 'stress', params)
    stress.load_stress_tool()
    time.sleep(30)
    stress.unload_stress()
    set_hugepage()
    hp_config.cleanup()
    vm.verify_kernel_crash()
Esempio n. 24
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            if libvirt_version.version_compare(7, 3, 0):
                cmd = cmd + " | grep " + '\\"discard-data\\":true'
            else:
                cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if max_mem:
            vmxml.max_mem = int(max_mem)
        if cur_mem:
            vmxml.current_mem = int(cur_mem)
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells)
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\
            else cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occurred or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)
    except xcepts.LibvirtXMLError:
        if define_error:
            pass
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
Esempio n. 25
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
Esempio n. 26
0
def run(test, params, env):
    """
    Test numa tuning with memory
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cmd = "lscpu | grep '^CPU(s):'"
        cpu_num = int(utils.run(cmd).stdout.strip().split(':')[1].strip())
        cpu_affinity_str = ""
        for i in range(cpu_num):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str

    def cpu_affinity_check(cpuset=None, node=None):
        """
        Check vcpuinfo cpu affinity

        :param cpuset: cpuset list
        :param node: node number list
        """
        result = virsh.vcpuinfo(vm_name, debug=True)
        output = result.stdout.strip().splitlines()[-1]
        cpu_affinity = output.split(":")[-1].strip()
        if node:
            tmp_list = []
            for node_num in node:
                host_node = utils_misc.NumaNode(i=node_num+1)
                logging.debug("node %s cpu list is %s" %
                              (node_num, host_node.cpus))
                tmp_list += host_node.cpus
            cpu_list = [int(i) for i in tmp_list]
        if cpuset:
            cpu_list = cpuset
        ret = format_affinity_str(cpu_list)
        logging.debug("expect cpu affinity is %s", ret)
        if cpu_affinity != ret:
            raise error.TestFail("vcpuinfo cpu affinity not expected")

    vcpu_placement = params.get("vcpu_placement")
    vcpu_cpuset = params.get("vcpu_cpuset")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("vms")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)

        # Get host cpu list
        tmp_list = []
        for node_num in node_list:
            host_node = utils_misc.NumaNode(i=node_num+1)
            logging.debug("node %s cpu list is %s" %
                          (node_num, host_node.cpus))
            tmp_list += host_node.cpus
        cpu_list = [int(i) for i in tmp_list]

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                if not set(used_node).issubset(node_list):
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        if vcpu_cpuset:
            pre_cpuset = utils_test.libvirt.cpus_parser(vcpu_cpuset)
            logging.debug("Parsed cpuset list is %s", pre_cpuset)
            if not set(pre_cpuset).issubset(cpu_list):
                raise error.TestNAError("cpuset %s out of range" %
                                        vcpu_cpuset)

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vcpu_num = vmxml.vcpu
        max_mem = vmxml.max_mem
        if vcpu_placement:
            vmxml.placement = vcpu_placement
        if vcpu_cpuset:
            vmxml.cpuset = vcpu_cpuset
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem/1024)

        try:
            vm.start()
            vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            if numa_memory.get('placement') == 'static':
                pre_numa_memory = numa_memory.copy()
                del pre_numa_memory['placement']
            else:
                pre_numa_memory = numa_memory

            if pre_numa_memory != numa_memory_new:
                raise error.TestFail("memory config %s not expected after "
                                     "domain start" % numa_memory_new)

            pos_vcpu_placement = vmxml_new.placement
            logging.debug("vcpu placement after domain start is %s",
                          pos_vcpu_placement)
            try:
                pos_cpuset = vmxml_new.cpuset
                logging.debug("vcpu cpuset after vm start is %s", pos_cpuset)
            except libvirt_xml.xcepts.LibvirtXMLNotFoundError:
                if vcpu_cpuset and vcpu_placement != 'auto':
                    raise error.TestFail("cpuset not found in domain xml.")

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        # Check qemu process numa memory usage
        memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
            host_numa_node,
            vm.get_pid())
        logging.debug("The memory status is %s", memory_status)
        logging.debug("The cpu usage is %s", qemu_cpu)

        if vcpu_cpuset:
            total_cpu = []
            for node_cpu in qemu_cpu:
                total_cpu += node_cpu
            for i in total_cpu:
                if int(i) not in pre_cpuset:
                    raise error.TestFail("cpu %s is not expected" % i)
            cpu_affinity_check(cpuset=pre_cpuset)
        if numa_memory.get('nodeset'):
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node = [node_list.index(i) for i in node_list if i not in used_node]
            used_node = [node_list.index(i) for i in used_node]
            mem_compare(used_node, left_node)

        logging.debug("numad log list is %s", numad_log)
        if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            if numad_log[0].split("numad ")[-1] != numad_cmd_opt:
                raise error.TestFail("numad command not expected in log")
            numad_ret = numad_log[1].split("numad: ")[-1]
            numad_node = utils_test.libvirt.cpus_parser(numad_ret)
            left_node = [node_list.index(i) for i in node_list if i not in numad_node]
            numad_node_seq = [node_list.index(i) for i in numad_node]
            logging.debug("numad nodes are %s", numad_node)
            if numa_memory.get('placement') == 'auto':
                mem_compare(numad_node_seq, left_node)
            if vcpu_placement == 'auto':
                for i in left_node:
                    if qemu_cpu[i]:
                        raise error.TestFail("cpu usage in node %s is not "
                                             "expected" % i)
                cpu_affinity_check(node=numad_node)
Esempio n. 27
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = utils.run(cmd, ignore_status=True).stdout.strip()
        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = utils.run(cmd, ignore_status=True).stdout.strip()
        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'power' in cpu_util.get_cpu_arch():
                if cpus_nodeinfo != cpus_total:
                    raise error.TestFail("Virsh nodeinfo output of CPU(s) on"
                                         " ppc did not match all threads in "
                                         "the system")
            else:
                raise error.TestFail("Virsh nodeinfo output didn't match "
                                     "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock' | head -n1 | "
               "awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or \
           float(diffval) / float(cpu_frequency_os) > 0.20:
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        total_sockets_in_node = int(cmd_result.stdout.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of virsh capabilities output")

        # Ckeck Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if threads_per_core_nodeinfo != cpu_topology['threads']:
            raise error.TestFail("Virsh nodeinfo output didn't match Thread(s) "
                                 "per core of virsh capabilities output")

        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        for i in node_online_list:
            node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
            memory_size_os += int(node_memory)
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
Esempio n. 28
0
def run(test, params, env):
    """
    Qemu multiqueue test for virtio-scsi controller:

    1) Boot up a guest with virtio-scsi device which support multi-queue and
       the vcpu and images number of guest should match the multi-queue number
    2) Check the multi queue option from monitor
    3) Check device init status in guest
    4) Load I/O in all targets
    5) Check the interrupt queues in guest

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def proc_interrupts_results(results):
        results_dict = {}
        cpu_count = 0
        cpu_list = []
        for line in results.splitlines():
            line = line.strip()
            if re.match("CPU0", line):
                cpu_list = re.findall("CPU\d+", line)
                cpu_count = len(cpu_list)
                continue
            if cpu_count > 0:
                irq_key = re.split(":", line)[0]
                results_dict[irq_key] = {}
                content = line[len(irq_key) + 1:].strip()
                if len(re.split("\s+", content)) < cpu_count:
                    continue
                count = 0
                irq_des = ""
                for irq_item in re.split("\s+", content):
                    if count < cpu_count:
                        if count == 0:
                            results_dict[irq_key]["count"] = []
                        results_dict[irq_key]["count"].append(irq_item)
                    else:
                        irq_des += " %s" % irq_item
                    count += 1
                results_dict[irq_key]["irq_des"] = irq_des.strip()
        return results_dict, cpu_list

    timeout = float(params.get("login_timeout", 240))
    host_cpu_num = local_host.LocalHost().get_num_cpu()
    while host_cpu_num:
        num_queues = str(host_cpu_num)
        host_cpu_num &= host_cpu_num - 1
    params['smp'] = num_queues
    params['num_queues'] = num_queues
    images_num = int(num_queues)
    extra_image_size = params.get("image_size_extra_images", "512M")
    system_image = params.get("images")
    system_image_drive_format = params.get("system_image_drive_format", "ide")
    params["drive_format_%s" % system_image] = system_image_drive_format
    dev_type = params.get("dev_type", "i440FX-pcihost")

    error.context("Boot up guest with block devcie with num_queues"
                  " is %s and smp is %s" % (num_queues, params['smp']),
                  logging.info)
    for vm in env.get_all_vms():
        if vm.is_alive():
            vm.destroy()
    for extra_image in range(images_num):
        image_tag = "stg%s" % extra_image
        params["images"] += " %s" % image_tag
        params["image_name_%s" % image_tag] = "images/%s" % image_tag
        params["image_size_%s" % image_tag] = extra_image_size
        params["force_create_image_%s" % image_tag] = "yes"
        image_params = params.object_params(image_tag)
        env_process.preprocess_image(test, image_params, image_tag)

    params["start_vm"] = "yes"
    vm = env.get_vm(params["main_vm"])
    env_process.preprocess_vm(test, params, env, vm.name)
    session = vm.wait_for_login(timeout=timeout)

    error.context("Check irqbalance service status", logging.info)
    output = session.cmd_output("systemctl status irqbalance")
    if not re.findall("Active: active", output):
        session.cmd("systemctl start irqbalance")
        output = session.cmd_output("systemctl status irqbalance")
        output = utils_misc.strip_console_codes(output)
        if not re.findall("Active: active", output):
            raise error.TestNAError("Can not start irqbalance inside guest. "
                                    "Skip this test.")

    error.context("Pin vcpus to host cpus", logging.info)
    host_numa_nodes = utils_misc.NumaInfo()
    vcpu_num = 0
    for numa_node_id in host_numa_nodes.nodes:
        numa_node = host_numa_nodes.nodes[numa_node_id]
        for _ in range(len(numa_node.cpus)):
            if vcpu_num >= len(vm.vcpu_threads):
                break
            vcpu_tid = vm.vcpu_threads[vcpu_num]
            logging.debug("pin vcpu thread(%s) to cpu"
                          "(%s)" % (vcpu_tid,
                                    numa_node.pin_cpu(vcpu_tid)))
            vcpu_num += 1

    error.context("Verify num_queues from monitor", logging.info)
    qtree = qemu_qtree.QtreeContainer()
    try:
        qtree.parse_info_qtree(vm.monitor.info('qtree'))
    except AttributeError:
        raise error.TestNAError("Monitor deson't supoort qtree "
                                "skip this test")
    error_msg = "Number of queues mismatch: expect %s"
    error_msg += " report from monitor: %s(%s)"
    scsi_bus_addr = ""
    for qdev in qtree.get_qtree().get_children():
        if qdev.qtree["type"] == dev_type:
            for pci_bus in qdev.get_children():
                for pcic in pci_bus.get_children():
                    if pcic.qtree["class_name"] == "SCSI controller":
                        qtree_queues = pcic.qtree["num_queues"].split("(")[0]
                        if qtree_queues.strip() != num_queues.strip():
                            error_msg = error_msg % (num_queues,
                                                     qtree_queues,
                                                     pcic.qtree["num_queues"])
                            raise error.TestFail(error_msg)
                    if pcic.qtree["class_name"] == "SCSI controller":
                        scsi_bus_addr = pcic.qtree['addr']
                        break

    if not scsi_bus_addr:
        raise error.TestError("Didn't find addr from qtree. Please check "
                              "the log.")
    error.context("Check device init status in guest", logging.info)
    init_check_cmd = params.get("init_check_cmd", "dmesg | grep irq")
    output = session.cmd_output(init_check_cmd)
    irqs_pattern = params.get("irqs_pattern", "%s:\s+irq\s+(\d+)")
    irqs_pattern = irqs_pattern % scsi_bus_addr
    irqs_watch = re.findall(irqs_pattern, output)
    # As there are several interrupts count for virtio device:
    # config, control, event and request. And the each queue have
    # a request count. So the totally count for virtio device should
    # equal to queus number plus three.
    if len(irqs_watch) != 3 + int(num_queues):
        raise error.TestFail("Failed to check the interrupt ids from dmesg")
    irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts")
    output = session.cmd_output(irq_check_cmd)
    irq_results, _ = proc_interrupts_results(output)
    for irq_watch in irqs_watch:
        if irq_watch not in irq_results:
            raise error.TestFail("Can't find irq %s from procfs" % irq_watch)

    error.context("Load I/O in all targets", logging.info)
    get_dev_cmd = params.get("get_dev_cmd", "ls /dev/[svh]d*")
    output = session.cmd_output(get_dev_cmd)
    system_dev = re.findall("[svh]d(\w+)\d+", output)[0]
    dd_timeout = int(re.findall("\d+", extra_image_size)[0])
    fill_cmd = ""
    count = 0
    for dev in re.split("\s+", output):
        if not dev:
            continue
        if not re.findall("[svh]d%s" % system_dev, dev):
            fill_cmd += " dd of=%s if=/dev/urandom bs=1M " % dev
            fill_cmd += "count=%s &&" % dd_timeout
            count += 1
    if count != images_num:
        raise error.TestError("Disks are not all show up in system. Output "
                              "from the check command: %s" % output)
    fill_cmd = fill_cmd.rstrip("&&")
    session.cmd(fill_cmd, timeout=dd_timeout)

    error.context("Check the interrupt queues in guest", logging.info)
    output = session.cmd_output(irq_check_cmd)
    irq_results, cpu_list = proc_interrupts_results(output)
    irq_bit_map = 0
    for irq_watch in irqs_watch:
        if "request" in irq_results[irq_watch]["irq_des"]:
            for index, count in enumerate(irq_results[irq_watch]["count"]):
                if int(count) > 0:
                    irq_bit_map |= 2 ** index

    cpu_count = 0
    error_msg = ""
    cpu_not_used = []
    for index, cpu in enumerate(cpu_list):
        if 2 ** index & irq_bit_map != 2 ** index:
            cpu_not_used.append(cpu)

    if cpu_not_used:
        logging.debug("Interrupt info from procfs:\n%s" % output)
        error_msg = " ".join(cpu_not_used)
        if len(cpu_not_used) > 1:
            error_msg += " are"
        else:
            error_msg += " is"
        error_msg += " not used during test. Please check debug log for"
        error_msg += " more information."
        raise error.TestFail(error_msg)
Esempio n. 29
0
def run(test, params, env):
    """
    Test numa memory migrate with live numa tuning
    """
    numad_log = []
    memory_status = []

    def _logger(line):
        """
        Callback function to log libvirtd output.
        """
        numad_log.append(line)

    def mem_compare(used_node, left_node):
        """
        Memory in used nodes should greater than left nodes

        :param used_node: used node list
        :param left_node: left node list
        """
        used_mem_total = 0
        left_node_mem_total = 0
        for i in used_node:
            used_mem_total += int(memory_status[i])
        for i in left_node:
            left_node_mem_total += int(memory_status[i])
        if left_node_mem_total > used_mem_total:
            raise error.TestFail("nodes memory usage not expected.")

    vm_name = params.get("main_vm")
    options = params.get("options", "live")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Get host numa node list
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)
    if len(node_list) < 2:
        raise error.TestNAError("At least 2 numa nodes are needed on host")

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    # Prepare libvirtd session with log level as 1
    config_path = "/var/tmp/virt-test.conf"
    open(config_path, 'a').close()
    config = utils_config.LibvirtdConfig(config_path)
    config.log_level = 1
    arg_str = "--config %s" % config_path
    numad_reg = ".*numad"
    libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger,
                                              logging_pattern=numad_reg)

    try:
        libvirtd.start(arg_str=arg_str)

        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("nodeset %s out of range" %
                                            numa_memory['nodeset'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        current_mem = vmxml.current_mem
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError, e:
            raise error.TestFail("Test failed in positive case.\n error: %s" %
                                 e)

        # get left used node beside current using
        if numa_memory.get('placement') == 'auto':
            if not numad_log:
                raise error.TestFail("numad usage not found in libvirtd log")
            logging.debug("numad log list is %s", numad_log)
            numad_ret = numad_log[1].split("numad: ")[-1]
            used_node = utils_test.libvirt.cpus_parser(numad_ret)
            logging.debug("numad nodes are %s", used_node)

        left_node = [i for i in node_list if i not in used_node]

        # run numatune live change numa memory config
        for node in left_node:
            virsh.numatune(vm_name,
                           'strict',
                           str(node),
                           options,
                           debug=True,
                           ignore_status=False)

            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            numa_memory_new = vmxml_new.numa_memory
            logging.debug("Current memory config dict is %s" % numa_memory_new)

            # Check xml config
            pos_numa_memory = numa_memory.copy()
            pos_numa_memory['nodeset'] = str(node)
            del pos_numa_memory['placement']
            logging.debug("Expect numa memory config is %s", pos_numa_memory)
            if pos_numa_memory != numa_memory_new:
                raise error.TestFail("numa memory config %s not expected after"
                                     " live update" % numa_memory_new)

            # Check qemu process numa memory usage
            host_numa_node = utils_misc.NumaInfo()
            memory_status, qemu_cpu = utils_test.qemu.get_numa_status(
                host_numa_node, vm.get_pid())
            logging.debug("The memory status is %s", memory_status)
            # If there are inconsistent node numbers on host,
            # convert it into sequence number so that it can be used
            # in mem_compare
            left_node_new = [
                node_list.index(i) for i in node_list if i != node
            ]
            used_node = [node_list.index(node)]

            mem_compare(used_node, left_node_new)
Esempio n. 30
0
def run(test, params, env):
    """
    Qemu numa consistency test:
    1) Get host numa topological structure
    2) Start a guest with the same node as the host, each node has one cpu
    3) Get the vcpu thread used cpu id in host and the cpu belongs which node
    4) Allocate memory inside guest and bind the allocate process to one of
       its vcpu.
    5) The memory used in host should increase in the same node if the vcpu
       thread is not switch to other node.
    6) Repeat step 3~5 for each vcpu thread of the guest.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_vcpu_used_node(numa_node_info, vcpu_thread):
        cpu_used_host = utils_misc.get_thread_cpu(vcpu_thread)[0]
        node_used_host = ([
            _ for _ in node_list
            if cpu_used_host in numa_node_info.nodes[_].cpus
        ][0])
        return node_used_host

    error.context("Get host numa topological structure", logging.info)
    timeout = float(params.get("login_timeout", 240))
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    if len(node_list) < 2:
        raise error.TestNAError("This host only has one NUMA node, "
                                "skipping test...")
    node_list.sort()
    params['smp'] = len(node_list)
    params['vcpu_cores'] = 1
    params['vcpu_threads'] = 1
    params['vcpu_sockets'] = params['smp']
    params['guest_numa_nodes'] = ""
    for node_id in range(len(node_list)):
        params['guest_numa_nodes'] += " node%d" % node_id
    params['start_vm'] = 'yes'

    utils_memory.drop_caches()
    vm = params['main_vm']
    env_process.preprocess_vm(test, params, env, vm)
    vm = env.get_vm(vm)
    vm.verify_alive()
    vcpu_threads = vm.vcpu_threads
    session = vm.wait_for_login(timeout=timeout)

    dd_size = 256
    if dd_size * len(vcpu_threads) > int(params['mem']):
        dd_size = int(int(params['mem']) / 2 / len(vcpu_threads))

    mount_size = dd_size * len(vcpu_threads)

    mount_cmd = "mount -o size=%dM -t tmpfs none /tmp" % mount_size

    qemu_pid = vm.get_pid()
    drop = 0
    for cpuid in range(len(vcpu_threads)):
        error.context("Get vcpu %s used numa node." % cpuid, logging.info)
        memory_status, _ = utils_test.qemu.get_numa_status(
            host_numa_node, qemu_pid)
        node_used_host = get_vcpu_used_node(host_numa_node,
                                            vcpu_threads[cpuid])
        node_used_host_index = node_list.index(node_used_host)
        memory_used_before = memory_status[node_used_host_index]
        error.context("Allocate memory in guest", logging.info)
        session.cmd(mount_cmd)
        binded_dd_cmd = "taskset %s" % str(2**int(cpuid))
        binded_dd_cmd += " dd if=/dev/urandom of=/tmp/%s" % cpuid
        binded_dd_cmd += " bs=1M count=%s" % dd_size
        session.cmd(binded_dd_cmd)
        error.context("Check qemu process memory use status", logging.info)
        node_after = get_vcpu_used_node(host_numa_node, vcpu_threads[cpuid])
        if node_after != node_used_host:
            logging.warn("Node used by vcpu thread changed. So drop the"
                         " results in this round.")
            drop += 1
            continue
        memory_status, _ = utils_test.qemu.get_numa_status(
            host_numa_node, qemu_pid)
        memory_used_after = memory_status[node_used_host_index]
        page_size = resource.getpagesize() / 1024
        memory_allocated = (memory_used_after -
                            memory_used_before) * page_size / 1024
        if 1 - float(memory_allocated) / float(dd_size) > 0.05:
            numa_hardware_cmd = params.get("numa_hardware_cmd")
            if numa_hardware_cmd:
                numa_info = utils.system_output(numa_hardware_cmd,
                                                ignore_status=True)
            msg = "Expect malloc %sM memory in node %s," % (dd_size,
                                                            node_used_host)
            msg += "but only malloc %sM \n" % memory_allocated
            msg += "Please check more details of the numa node: %s" % numa_info
            raise error.TestFail(msg)
    session.close()

    if drop == len(vcpu_threads):
        raise error.TestError("All test rounds are dropped."
                              " Please test it again.")