def run(test, params, env):
    """
    Test virsh command virsh freepages
    """

    option = params.get("freepages_option", "")
    status_error = "yes" == params.get("status_error", "no")
    cellno = params.get("freepages_cellno")
    pagesize = params.get("freepages_page_size")
    huge_pages_num = params.get("huge_pages_num")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)

    for i in node_list:
        try:
            hp_cl.get_node_num_huge_pages(i, default_hp_size)
        except ValueError, e:
            logging.warning("%s", e)
            logging.debug('move node %s out of testing node list', i)
            node_list.remove(i)
Exemple #2
0
def run(test, params, env):
    """
    Re-assign nr-hugepages

    1. Set up hugepage with 1G page size and hugepages=8
    2. Boot up guest using /mnt/kvm_hugepage as backend in QEMU CML
    3. Change the nr_hugepages after the guest boot up (6 and 10)
    4. Run the stress test **inside guest**
    5. change the nr_hugepages after the stress test (6 and 10)

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def set_hugepage():
        """Set nr_hugepages"""
        try:
            for h_size in (origin_nr - 2, origin_nr + 2):
                hp_config.target_hugepages = h_size
                hp_config.set_hugepages()
                if params.get('on_numa_node'):
                    logging.info('Set hugepage size %s to target node %s',
                                 h_size, target_node)
                    hp_config.set_node_num_huge_pages(h_size, target_node,
                                                      hugepage_size)
        except ValueError as err:
            test.cancel(err)

    def run_stress():
        def heavyload_install():
            if session.cmd_status(test_install_cmd) != 0:
                logging.warning("Could not find installed heavyload in guest, "
                                "will install it via winutils.iso ")
                winutil_drive = utils_misc.get_winutils_vol(session)
                if not winutil_drive:
                    test.cancel("WIN_UTILS CDROM not found.")
                install_cmd = params["install_cmd"] % winutil_drive
                session.cmd(install_cmd)

        logging.info('Loading stress on guest.')
        stress_duration = params.get("stress_duration", 60)
        if params["os_type"] == "linux":
            params['stress_args'] = '--vm %s --vm-bytes 256M --timeout %s' % (
                mem // 512, stress_duration)
            stress = utils_test.VMStress(vm, 'stress', params)
            stress.load_stress_tool()
            time.sleep(stress_duration)
            stress.unload_stress()
        else:
            session = vm.wait_for_login()
            install_path = params["install_path"]
            test_install_cmd = 'dir "%s" | findstr /I heavyload' % install_path
            heavyload_install()
            heavyload_bin = r'"%s\heavyload.exe" ' % install_path
            heavyload_options = [
                "/MEMORY 100",
                "/DURATION %d" % (stress_duration // 60), "/AUTOEXIT", "/START"
            ]
            start_cmd = heavyload_bin + " ".join(heavyload_options)
            stress_tool = BackgroundTest(
                session.cmd, (start_cmd, stress_duration, stress_duration))
            stress_tool.start()
            if not utils_misc.wait_for(stress_tool.is_alive, 30):
                test.error("Failed to start heavyload process")
            stress_tool.join(stress_duration)

    origin_nr = int(params['origin_nr'])
    host_numa_node = utils_misc.NumaInfo()
    mem = int(float(utils_misc.normalize_data_size("%sM" % params["mem"])))
    if params.get('on_numa_node'):
        for target_node in host_numa_node.get_online_nodes_withmem():
            node_mem_free = host_numa_node.read_from_node_meminfo(
                target_node, 'MemFree')
            if int(node_mem_free) > mem:
                params['target_nodes'] = target_node
                params["qemu_command_prefix"] = ("numactl --membind=%s" %
                                                 target_node)
                params['target_num_node%s' % target_node] = origin_nr
                break
            logging.info(
                'The free memory of node %s is %s, is not enough for'
                ' guest memory: %s', target_node, node_mem_free, mem)
        else:
            test.cancel("No node on your host has sufficient free memory for "
                        "this test.")
    hp_config = test_setup.HugePageConfig(params)
    hp_config.target_hugepages = origin_nr
    logging.info('Setup hugepage number to %s', origin_nr)
    hp_config.setup()
    hugepage_size = utils_memory.get_huge_page_size()
    params["hugepage_path"] = hp_config.hugepage_path
    params['start_vm'] = "yes"
    vm_name = params['main_vm']
    env_process.preprocess_vm(test, params, env, vm_name)
    vm = env.get_vm(vm_name)
    run_stress()
    set_hugepage()
    hp_config.cleanup()
    vm.verify_kernel_crash()
Exemple #3
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                session.cmd('rm -rf %s' % fs_dev.source['dir'],
                            ignore_all_errors=False)
                session.cmd('mkdir -p %s' % fs_dev.source['dir'])
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   fs_dev.source['dir'])
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename = fs_dev.source['dir'] + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output("md5sum %s" %
                                                      filename)[1].strip()
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run("md5sum %s" %
                                        filename).stdout_text.strip()
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    source_dir_prefix = params.get("source_dir_prefix", "/dir")
    target_prefix = params.get("target_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_results = ""
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)
    try:
        # Define filesystem device xml
        for index in range(fs_num):
            fsdev_keys = ['accessmode', 'driver', 'source', 'target', 'binary']
            accessmode = "passthrough"
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = str(source_dir_prefix) + str(index)
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = target_prefix + str(index)
            source = {'dir': source_dir}
            target = {'dir': target_dir}
            fsdev_dict = [accessmode, driver, source, target]
            binary_keys = [
                'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
            ]
            binary_values = [path, cache_mode, xattr, lock_posix, flock]
            binary_dict = dict(zip(binary_keys, binary_values))
            fsdev_values = [accessmode, driver, source, target, binary_dict]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(fsdev_dict)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            hp_obj = test_setup.HugePageConfig(params)
            host_hp_size = hp_obj.get_hugepage_size()
            huge_pages_num += vmxml.max_mem // host_hp_size + 128
            utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared")
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                for fs in fs_devs:
                    vmxml.add_device(fs)
                    vmxml.sync()
            logging.debug(vmxml)
            result = virsh.start(vm_names[index], debug=True)
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            cmd = 'ps aux | grep virtiofsd | head -n 1'
            utils_test.libvirt.check_cmd_output(cmd, content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")

    finally:
        for vm in vms:
            if vm.is_alive():
                session = vm.wait_for_login()
                for fs_dev in fs_devs:
                    mount_dir = fs_dev.source['dir']
                    logging.debug(mount_dir)
                    session.cmd('umount -f %s' % mount_dir,
                                ignore_all_errors=True)
                    session.cmd('rm -rf %s' % mount_dir,
                                ignore_all_errors=True)
                session.close()
                vm.destroy(gracefully=False)
        for index in range(fs_num):
            process.run('rm -rf %s' % source_dir_prefix + str(index),
                        ignore_status=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
def run(test, params, env):
    """
    Test virsh command virsh freepages
    """

    option = params.get("freepages_option", "")
    status_error = "yes" == params.get("status_error", "no")
    cellno = params.get("freepages_cellno")
    pagesize = params.get("freepages_page_size")
    huge_pages_num = params.get("huge_pages_num")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    logging.debug("host node list is %s", node_list)
    oth_node = []

    for node in node_list:
        try:
            hp_cl.get_node_num_huge_pages(node, default_hp_size)
        except ValueError as exc:
            logging.warning("%s", str(exc))
            logging.debug('move node %s out of testing node list', node)
            node_list.remove(node)
            oth_node.append(node)

    cellno_list = []
    if cellno == "AUTO":
        cellno_list = node_list
    elif cellno == "OUT_OF_RANGE":
        cellno_list.append(max(node_list) + 1)
    else:
        cellno_list.append(cellno)

    pagesize_list = []
    if pagesize == "AUTO":
        pagesize_list = supported_hp_size
    else:
        pagesize_list.append(pagesize)

    if huge_pages_num and not status_error:
        try:
            huge_pages_num = int(huge_pages_num)
        except (TypeError, ValueError):
            raise error.TestError("Huge page value %s should be an integer" %
                                  huge_pages_num)
        # Set huge page for positive test
        for i in node_list:
            hp_cl.set_node_num_huge_pages(huge_pages_num, i, default_hp_size)

    # Run test
    for cell in cellno_list:
        for page in pagesize_list:
            result = virsh.freepages(cellno=cell,
                                     pagesize=page,
                                     options=option,
                                     debug=True)

            # the node without memory will fail and it is expected
            if "--all" in option and oth_node and not status_error:
                status_error = True

            utlv.check_exit_status(result, status_error)
            expect_result_list = []
            # Check huge pages
            if (not status_error and huge_pages_num
                    and page == str(default_hp_size)):
                page_k = "%sKiB" % page
                node_dict = {page_k: str(huge_pages_num)}
                if cell is None:
                    for i in node_list:
                        tmp_dict = {}
                        tmp_dict['Node'] = "Node %s" % i
                        tmp_dict.update(node_dict)
                        expect_result_list.append(tmp_dict)
                else:
                    expect_result_list = [node_dict]

                if check_freepages(result.stdout.strip(), expect_result_list):
                    logging.info("Huge page freepages check pass")
                else:
                    raise error.TestFail("Huge page freepages check failed,"
                                         " expect result is %s" %
                                         expect_result_list)