Exemple #1
0
 def wrapped(self, *args, **dargs):
     try:
         return f(self, *args, **dargs)
     finally:
         if self._logger.global_filename == 'status':
             self.harness.run_test_complete()
             if self.drop_caches:
                 utils_memory.drop_caches()
Exemple #2
0
 def wrapped(self, *args, **dargs):
     try:
         return f(self, *args, **dargs)
     finally:
         if self._logger.global_filename == "status":
             self.harness.run_test_complete()
             if self.drop_caches:
                 utils_memory.drop_caches()
Exemple #3
0
def run_boot_time(test, params, env):
    """
    KVM boot time test:
    1) Set init run level to 1
    2) Send a shutdown command to the guest, or issue a system_powerdown
       monitor command (depending on the value of shutdown_method)
    3) Boot up the guest and measure the boot time
    4) set init run level back to the old one

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment
    """

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    error.context("Set guest run level to 1", logging.info)
    single_user_cmd = params['single_user_cmd']
    session.cmd(single_user_cmd)

    try:
        error.context("Shut down guest", logging.info)
        session.cmd('sync')
        vm.destroy()

        error.context("Boot up guest and measure the boot time", logging.info)
        utils_memory.drop_caches()
        vm.create()
        vm.verify_alive()
        session = vm.wait_for_serial_login(timeout=timeout)
        boot_time = time.time() - vm.start_time
        expect_time = int(params.get("expect_bootup_time", "17"))
        logging.info("Boot up time: %ss" % boot_time)

    finally:
        try:
            error.context("Restore guest run level", logging.info)
            restore_level_cmd = params['restore_level_cmd']
            session.cmd(restore_level_cmd)
            session.cmd('sync')
            vm.destroy()
            vm.create()
            vm.verify_alive()
            vm.wait_for_login(timeout=timeout)
        except Exception:
            logging.Warn("Can not restore guest run level, "
                         "need restore the image")
            params["restore_image_after_testing"] = "yes"

    if boot_time > expect_time:
        raise error.TestFail("Guest boot up is taking too long: %ss" %
                             boot_time)

    session.close()
Exemple #4
0
def run_boot_time(test, params, env):
    """
    KVM boot time test:
    1) Set init run level to 1
    2) Send a shutdown command to the guest, or issue a system_powerdown
       monitor command (depending on the value of shutdown_method)
    3) Boot up the guest and measure the boot time
    4) set init run level back to the old one

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment
    """

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    error.context("Set guest run level to 1", logging.info)
    single_user_cmd = params['single_user_cmd']
    session.cmd(single_user_cmd)

    try:
        error.context("Shut down guest", logging.info)
        session.cmd('sync')
        vm.destroy()

        error.context("Boot up guest and measure the boot time", logging.info)
        utils_memory.drop_caches()
        vm.create()
        vm.verify_alive()
        session = vm.wait_for_serial_login(timeout=timeout)
        boot_time = time.time() - vm.start_time
        expect_time = int(params.get("expect_bootup_time", "17"))
        logging.info("Boot up time: %ss" % boot_time)

    finally:
        try:
            error.context("Restore guest run level", logging.info)
            restore_level_cmd = params['restore_level_cmd']
            session.cmd(restore_level_cmd)
            session.cmd('sync')
            vm.destroy()
            vm.create()
            vm.verify_alive()
            vm.wait_for_login(timeout=timeout)
        except Exception:
            logging.warning("Can not restore guest run level, "
                         "need restore the image")
            params["restore_image_after_testing"] = "yes"

    if boot_time > expect_time:
        raise error.TestFail("Guest boot up is taking too long: %ss" % boot_time)

    session.close()
Exemple #5
0
 def _init_drop_caches(self, drop_caches):
     """
     Perform the drop caches initialization.
     """
     self.drop_caches_between_iterations = settings.get_value(
         "CLIENT", "drop_caches_between_iterations", type=bool, default=True
     )
     self.drop_caches = drop_caches
     if self.drop_caches:
         utils_memory.drop_caches()
Exemple #6
0
 def _init_drop_caches(self, drop_caches):
     """
     Perform the drop caches initialization.
     """
     self.drop_caches_between_iterations = (settings.get_value('CLIENT',
                                                               'drop_caches_between_iterations',
                                                               type=bool, default=True))
     self.drop_caches = drop_caches
     if self.drop_caches:
         utils_memory.drop_caches()
Exemple #7
0
def run_numa_basic(test, params, env):
    """
    Qemu numa basic test:
    1) Get host numa topological structure
    2) Start a guest and bind it on the cpus of one node
    3) Check the memory status of qemu process. It should mainly use the
       memory in the same node.
    4) Destory the guest
    5) Repeat step 2 ~ 4 on every node in host

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    error.context("Get host numa topological structure", logging.info)
    timeout = float(params.get("login_timeout", 240))
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    for node_id in node_list:
        error.base_context("Bind qemu process to numa node %s" % node_id,
                           logging.info)
        vm = "vm_bind_to_%s" % node_id
        params['qemu_command_prefix'] = "numactl --cpunodebind=%s" % node_id
        utils_memory.drop_caches()
        env_process.preprocess_vm(test, params, env, vm)
        vm = env.get_vm(vm)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=timeout)
        session.close()

        error.context("Check the memory use status of qemu process",
                      logging.info)
        memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
                                                           vm.get_pid())
        node_used_most = 0
        memory_sz_used_most = 0
        for index in range(len(node_list)):
            if memory_sz_used_most < memory_status[index]:
                memory_sz_used_most = memory_status[index]
                node_used_most = node_list[index]
            logging.debug("Qemu used %s pages in node"
                          " %s" % (memory_status[index], node_list[index]))
        if node_used_most != node_id:
            raise error.TestFail("Qemu still use memory from other node."
                                  " Expect: %s, used: %s" % (node_id,
                                                             node_used_most))

        error.context("Destroy guest.", logging.info)
        vm.destroy()
Exemple #8
0
 def drop_caches_between_iterations(self):
     if self.job.drop_caches_between_iterations:
         utils_memory.drop_caches()
    def run_once(self, mount_point, file_count, write_size,
                 max_flush_time=1, file_system=None, remove_previous=False,
                 sparse_file=os.path.join(os.getcwd(), 'sparse_file'),
                 old_cleanup=False):
        """
        Control execution of the test.

        :param mount_point: the absolute path to the mount point.
        :param file_count: the number of files to write.
        :param write_size: the size of each file in MB.
        :param max_flush_time: the maximum time to wait for the writeback to
                flush dirty data to disk. Default = 1 minute.
        :param file_system: the new file system to be mounted, if any.
                Default = None.
        :param remove_previous: boolean that allows the removal of previous
                files before creating a new one. Default = False.
        :param sparse_file: the absolute path to the sparse file.
        :param old_cleanup: removes previous mount_point if it exists and is
                not mounted. Default is False.
        """
        # Check validity of parameters.
        self._check_parameters(mount_point, write_size, file_count,
                               old_cleanup)

        # Initialize class variables.
        self.mount_point = mount_point
        self.sparse_file = sparse_file
        self.file_system = file_system

        # Initialize partition values.
        self._create_partition()

        # Flush read and write cache.
        utils_memory.drop_caches()

        # Start iterations.
        logging.info('Starting test operations.')
        test_start_time = datetime.datetime.now()
        counter = 1

        # Run test until file_count files are successfully written to disk.
        while counter < file_count:
            logging.info('Iteration %s.', counter)

            # Write data to disk.
            write_completion_time = self._write_data(self.mount_point, counter,
                                                     write_size)
            logging.debug('Write time:%s',
                          write_completion_time.strftime("%H:%M:%S"))

            # Wait until data get synced to disk.
            time_taken = self._wait_until_data_flushed(write_completion_time,
                                                       max_flush_time)

            # Log time statistics.
            logging.info('Time taken to flush data: %s seconds.',
                         time_taken.seconds)

            # Check if there is a need to remove the previously written file.
            if remove_previous:
                logging.debug('Removing previous file instance.')
                os.remove(sparse_file)
            else:
                logging.debug('Not removing previous file instance.')

            # Flush cache.
            logging.debug('Flush cache between iterations.')
            utils_memory.drop_caches()

           # Update the result map.
            self.result_map[counter] = time_taken.seconds

            # Increment the counter.
            counter += 1
Exemple #10
0
    def run_once(self,
                 mount_point,
                 file_count,
                 write_size,
                 max_flush_time=1,
                 file_system=None,
                 remove_previous=False,
                 sparse_file=os.path.join(os.getcwd(), 'sparse_file'),
                 old_cleanup=False):
        """
        Control execution of the test.

        :param mount_point: the absolute path to the mount point.
        :param file_count: the number of files to write.
        :param write_size: the size of each file in MB.
        :param max_flush_time: the maximum time to wait for the writeback to
                flush dirty data to disk. Default = 1 minute.
        :param file_system: the new file system to be mounted, if any.
                Default = None.
        :param remove_previous: boolean that allows the removal of previous
                files before creating a new one. Default = False.
        :param sparse_file: the absolute path to the sparse file.
        :param old_cleanup: removes previous mount_point if it exists and is
                not mounted. Default is False.
        """
        # Check validity of parameters.
        self._check_parameters(mount_point, write_size, file_count,
                               old_cleanup)

        # Initialize class variables.
        self.mount_point = mount_point
        self.sparse_file = sparse_file
        self.file_system = file_system

        # Initialize partition values.
        self._create_partition()

        # Flush read and write cache.
        utils_memory.drop_caches()

        # Start iterations.
        logging.info('Starting test operations.')
        test_start_time = datetime.datetime.now()
        counter = 1

        # Run test until file_count files are successfully written to disk.
        while counter < file_count:
            logging.info('Iteration %s.', counter)

            # Write data to disk.
            write_completion_time = self._write_data(self.mount_point, counter,
                                                     write_size)
            logging.debug('Write time:%s',
                          write_completion_time.strftime("%H:%M:%S"))

            # Wait until data get synced to disk.
            time_taken = self._wait_until_data_flushed(write_completion_time,
                                                       max_flush_time)

            # Log time statistics.
            logging.info('Time taken to flush data: %s seconds.',
                         time_taken.seconds)

            # Check if there is a need to remove the previously written file.
            if remove_previous:
                logging.debug('Removing previous file instance.')
                os.remove(sparse_file)
            else:
                logging.debug('Not removing previous file instance.')

            # Flush cache.
            logging.debug('Flush cache between iterations.')
            utils_memory.drop_caches()

            # Update the result map.
            self.result_map[counter] = time_taken.seconds

            # Increment the counter.
            counter += 1
Exemple #11
0
 def drop_caches_between_iterations(self):
     if self.job.drop_caches_between_iterations:
         utils_memory.drop_caches()
Exemple #12
0
def run_numa_stress(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = utils_test.max_mem_map_node(host_numa_node,
                                                              qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" % (numa_node_malloc,
                                                            str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run_autotest_control(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = utils_test.max_mem_map_node(host_numa_node,
                                                               qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()
Exemple #13
0
def run_numa_consistency(test, params, env):
    """
    Qemu numa consistency test:
    1) Get host numa topological structure
    2) Start a guest with the same node as the host, each node has one cpu
    3) Get the vcpu thread used cpu id in host and the cpu belongs which node
    4) Allocate memory inside guest and bind the allocate process to one of
       its vcpu.
    5) The memory used in host should increase in the same node if the vcpu
       thread is not switch to other node.
    6) Repeat step 3~5 for each vcpu thread of the guest.

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    def get_vcpu_used_node(numa_node_info, vcpu_thread):
        cpu_used_host = utils_misc.get_thread_cpu(vcpu_thread)[0]
        node_used_host = ([
            _ for _ in node_list
            if cpu_used_host in numa_node_info.nodes[_].cpus
        ][0])
        return node_used_host

    error.context("Get host numa topological structure", logging.info)
    timeout = float(params.get("login_timeout", 240))
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    if len(node_list) < 2:
        raise error.TestNAError("This host only has one NUMA node, "
                                "skipping test...")
    node_list.sort()
    params['smp'] = len(node_list)
    params['vcpu_cores'] = 1
    params['vcpu_threads'] = 1
    params['vcpu_sockets'] = params['smp']
    params['guest_numa_nodes'] = ""
    for node_id in range(len(node_list)):
        params['guest_numa_nodes'] += " node%d" % node_id
    params['start_vm'] = 'yes'

    utils_memory.drop_caches()
    vm = params['main_vm']
    env_process.preprocess_vm(test, params, env, vm)
    vm = env.get_vm(vm)
    vm.verify_alive()
    vcpu_threads = vm.vcpu_threads
    session = vm.wait_for_login(timeout=timeout)

    dd_size = 256
    if dd_size * len(vcpu_threads) > int(params['mem']):
        dd_size = int(int(params['mem']) / 2 / len(vcpu_threads))

    mount_size = dd_size * len(vcpu_threads)

    mount_cmd = "mount -o size=%dM -t tmpfs none /tmp" % mount_size

    qemu_pid = vm.get_pid()
    drop = 0
    for cpuid in range(len(vcpu_threads)):
        error.context("Get vcpu %s used numa node." % cpuid, logging.info)
        memory_status, _ = utils_test.get_qemu_numa_status(
            host_numa_node, qemu_pid)
        node_used_host = get_vcpu_used_node(host_numa_node,
                                            vcpu_threads[cpuid])
        memory_used_before = memory_status[node_used_host]
        error.context("Allocate memory in guest", logging.info)
        session.cmd(mount_cmd)
        binded_dd_cmd = "taskset %s" % str(2**int(cpuid))
        binded_dd_cmd += " dd if=/dev/urandom of=/tmp/%s" % cpuid
        binded_dd_cmd += " bs=1M count=%s" % dd_size
        session.cmd(binded_dd_cmd)
        error.context("Check qemu process memory use status", logging.info)
        node_after = get_vcpu_used_node(host_numa_node, vcpu_threads[cpuid])
        if node_after != node_used_host:
            logging.warn("Node used by vcpu thread changed. So drop the"
                         " results in this round.")
            drop += 1
            continue
        memory_status, _ = utils_test.get_qemu_numa_status(
            host_numa_node, qemu_pid)
        memory_used_after = memory_status[node_used_host]

        memory_allocated = (memory_used_after - memory_used_before) * 4 / 1024

        if 1 - float(memory_allocated) / float(dd_size) > 0.05:
            raise error.TestFail("Expect malloc %sM memory in node %s, but "
                                 "only malloc %sM" %
                                 (dd_size, node_used_host, memory_allocated))
    session.close()

    if drop == len(vcpu_threads):
        raise error.TestError("All test rounds are dropped."
                              " Please test it again.")
Exemple #14
0
def run_numa_stress(test, params, env):
    """
    Qemu numa stress test:
    1) Boot up a guest and find the node it used
    2) Try to allocate memory in that node
    3) Run memory heavy stress inside guest
    4) Check the memory use status of qemu process
    5) Repeat step 2 ~ 4 several times


    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    host_numa_node = utils_misc.NumaInfo()
    if len(host_numa_node.online_nodes) < 2:
        raise error.TestNAError("Host only has one NUMA node, "
                                "skipping test...")

    timeout = float(params.get("login_timeout", 240))
    test_count = int(params.get("test_count", 4))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=timeout)

    qemu_pid = vm.get_pid()

    if test_count < len(host_numa_node.online_nodes):
        test_count = len(host_numa_node.online_nodes)

    tmpfs_size = 0
    for node in host_numa_node.nodes:
        node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal"))
        if tmpfs_size < node_mem:
            tmpfs_size = node_mem
    tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test")
    tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path)
    tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240))
    dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5
    mount_fs_size = "size=%dK" % tmpfs_size
    memory_file = utils_misc.get_path(tmpfs_path, "test")
    dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file,
                                                          tmpfs_size)

    if not os.path.isdir(tmpfs_path):
        os.mkdir(tmpfs_path)

    numa_node_malloc = -1
    most_used_node, memory_used = utils_test.max_mem_map_node(
        host_numa_node, qemu_pid)

    for test_round in range(test_count):
        if utils_memory.freememtotal() < tmpfs_size:
            raise error.TestError("Don't have enough memory to execute this "
                                  "test after %s round" % test_round)
        error.context("Executing stress test round: %s" % test_round,
                      logging.info)
        numa_node_malloc = most_used_node
        numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd)
        error.context("Try to allocate memory in node %s" % numa_node_malloc,
                      logging.info)
        try:
            utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size)
            funcatexit.register(env, params.get("type"), utils_misc.umount,
                                "none", tmpfs_path, "tmpfs")
            utils.system(numa_dd_cmd, timeout=dd_timeout)
        except Exception, error_msg:
            if "No space" in str(error_msg):
                pass
            else:
                raise error.TestFail("Can not allocate memory in node %s."
                                     " Error message:%s" %
                                     (numa_node_malloc, str(error_msg)))
        error.context("Run memory heavy stress in guest", logging.info)
        autotest_control.run_autotest_control(test, params, env)
        error.context("Get the qemu process memory use status", logging.info)
        node_after, memory_after = utils_test.max_mem_map_node(
            host_numa_node, qemu_pid)
        if node_after == most_used_node and memory_after >= memory_used:
            raise error.TestFail("Memory still stick in "
                                 "node %s" % numa_node_malloc)
        else:
            most_used_node = node_after
            memory_used = memory_after
        utils_misc.umount("none", tmpfs_path, "tmpfs")
        funcatexit.unregister(env, params.get("type"), utils_misc.umount,
                              "none", tmpfs_path, "tmpfs")
        session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches")
        utils_memory.drop_caches()
Exemple #15
0
def run_numa_consistency(test, params, env):
    """
    Qemu numa consistency test:
    1) Get host numa topological structure
    2) Start a guest with the same node as the host, each node has one cpu
    3) Get the vcpu thread used cpu id in host and the cpu belongs which node
    4) Allocate memory inside guest and bind the allocate process to one of
       its vcpu.
    5) The memory used in host should increase in the same node if the vcpu
       thread is not switch to other node.
    6) Repeat step 3~5 for each vcpu thread of the guest.

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    def get_vcpu_used_node(numa_node_info, vcpu_thread):
        cpu_used_host = utils_misc.get_thread_cpu(vcpu_thread)[0]
        node_used_host = ([_ for _ in node_list if cpu_used_host
                           in numa_node_info.nodes[_].cpus][0])
        return node_used_host

    error.context("Get host numa topological structure", logging.info)
    timeout = float(params.get("login_timeout", 240))
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    if len(node_list) < 2:
        raise error.TestNAError("This host only has one NUMA node, "
                                "skipping test...")
    node_list.sort()
    params['smp'] = len(node_list)
    params['vcpu_cores'] = 1
    params['vcpu_threads'] = 1
    params['vcpu_sockets'] = params['smp']
    params['guest_numa_nodes'] = ""
    for node_id in range(len(node_list)):
        params['guest_numa_nodes'] += " node%d" % node_id
    params['start_vm'] = 'yes'

    utils_memory.drop_caches()
    vm = params['main_vm']
    env_process.preprocess_vm(test, params, env, vm)
    vm = env.get_vm(vm)
    vm.verify_alive()
    vcpu_threads = vm.vcpu_threads
    session = vm.wait_for_login(timeout=timeout)

    dd_size = 256
    if dd_size * len(vcpu_threads) > int(params['mem']):
        dd_size = int(int(params['mem']) / 2 / len(vcpu_threads))

    mount_size = dd_size * len(vcpu_threads)

    mount_cmd = "mount -o size=%dM -t tmpfs none /tmp" % mount_size

    qemu_pid = vm.get_pid()
    drop = 0
    for cpuid in range(len(vcpu_threads)):
        error.context("Get vcpu %s used numa node." % cpuid, logging.info)
        memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
                                                           qemu_pid)
        node_used_host = get_vcpu_used_node(host_numa_node,
                                            vcpu_threads[cpuid])
        memory_used_before = memory_status[node_used_host]
        error.context("Allocate memory in guest", logging.info)
        session.cmd(mount_cmd)
        binded_dd_cmd = "taskset %s" % str(2 ** int(cpuid))
        binded_dd_cmd += " dd if=/dev/urandom of=/tmp/%s" % cpuid
        binded_dd_cmd += " bs=1M count=%s" % dd_size
        session.cmd(binded_dd_cmd)
        error.context("Check qemu process memory use status", logging.info)
        node_after = get_vcpu_used_node(host_numa_node, vcpu_threads[cpuid])
        if node_after != node_used_host:
            logging.warn("Node used by vcpu thread changed. So drop the"
                         " results in this round.")
            drop += 1
            continue
        memory_status, _ = utils_test.get_qemu_numa_status(host_numa_node,
                                                           qemu_pid)
        memory_used_after = memory_status[node_used_host]

        memory_allocated = (memory_used_after - memory_used_before) * 4 / 1024

        if 1 - float(memory_allocated) / float(dd_size) > 0.05:
            raise error.TestFail("Expect malloc %sM memory in node %s, but "
                                 "only malloc %sM" % (dd_size, node_used_host,
                                                      memory_allocated))
    session.close()

    if drop == len(vcpu_threads):
        raise error.TestError("All test rounds are dropped."
                              " Please test it again.")