Beispiel #1
0
def run(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" % (numa_node_malloc,
                                                            str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()
    except IndexError:
        raise virt_vm.VMConfigMissingError(vm.name, "isa_serial")

    log_file = utils_misc.get_path(test.debugdir,
                                   "serial-%s-%s.log" % (serial_name,
                                                         vm.name))
    logging.debug("Monitoring serial console log for completion message: %s",
                  log_file)
    serial_log_msg = ""
    serial_read_fails = 0

    # As the the install process start. We may need collect informations from
    # the image. So use the test case instead this simple function in the
    # following code.
    if mount_point and src:
        funcatexit.unregister(env, params.get("type"), copy_file_from_nfs,
                              src, dst, mount_point, image_name)

    while (time.time() - start_time) < install_timeout:
        try:
            vm.verify_alive()
        # Due to a race condition, sometimes we might get a MonitorError
        # before the VM gracefully shuts down, so let's capture MonitorErrors.
        except (virt_vm.VMDeadError, qemu_monitor.MonitorError), e:
            if params.get("wait_no_ack", "no") == "yes":
                break
            else:
                # Print out the original exception before copying images.
                logging.error(e)
                copy_images()
                raise e
Beispiel #3
0
def run(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        test.cancel("Host only has one NUMA node, skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = params.get_numeric("tmpfs_size")
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size == 0:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = get_tmpfs_write_speed()
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)
    utils_memory.drop_caches()

    if utils_memory.freememtotal() < tmpfs_size:
        test.cancel("Host does not have enough free memory to run the test, "
                    "skipping test...")

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    test_mem = float(params.get("mem")) * float(params.get("mem_ratio", 0.8))
    stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2)
    most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if os.path.exists(memory_file):
            os.remove(memory_file)
        utils_memory.drop_caches()
        if utils_memory.freememtotal() < tmpfs_size:
            test.error("Don't have enough memory to execute this "
                       "test after %s round" % test_round)
        error_context.context("Executing stress test round: %s" % test_round,
                              logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error_context.context(
            "Try to allocate memory in node %s" % numa_node_malloc,
            logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            process.system(numa_dd_cmd, timeout=dd_timeout, shell=True)
        except Exception as error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                test.fail("Can not allocate memory in node %s."
                          " Error message:%s" %
                          (numa_node_malloc, str(error_msg)))
        error_context.context("Run memory heavy stress in guest", logging.info)
        stress_test = utils_test.VMStress(vm,
                                          "stress",
                                          params,
                                          stress_args=stress_args)
        stress_test.load_stress_tool()
        error_context.context("Get the qemu process memory use status",
                              logging.info)
        node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            test.fail("Memory still stick in node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        stress_test.unload_stress()
        stress_test.clean()
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()

    session.close()
        serial_name = vm.serial_ports[0]
    except IndexError:
        raise virt_vm.VMConfigMissingError(vm.name, "serial")

    log_file = utils_misc.get_path(test.debugdir,
                                   "serial-%s-%s.log" % (serial_name, vm.name))
    logging.debug("Monitoring serial console log for completion message: %s",
                  log_file)
    serial_log_msg = ""
    serial_read_fails = 0

    # As the the install process start. We may need collect informations from
    # the image. So use the test case instead this simple function in the
    # following code.
    if mount_point and src:
        funcatexit.unregister(env, params.get("type"), copy_file_from_nfs, src,
                              dst, mount_point, image_name)

    send_key_timeout = int(params.get("send_key_timeout", 60))
    while (time.time() - start_time) < install_timeout:
        try:
            vm.verify_alive()
            if (params.get("send_key_at_install")
                    and (time.time() - start_time) < send_key_timeout):
                vm.send_key(params.get("send_key_at_install"))
        # Due to a race condition, sometimes we might get a MonitorError
        # before the VM gracefully shuts down, so let's capture MonitorErrors.
        except (virt_vm.VMDeadError, qemu_monitor.MonitorError), e:
            if params.get("wait_no_ack", "no") == "yes":
                break
            else:
                # Print out the original exception before copying images.
Beispiel #5
0
def run_numa_stress(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = utils_test.max_mem_map_node(
        host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" %
                                     (numa_node_malloc, str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run_autotest_control(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = utils_test.max_mem_map_node(
            host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()