def nr_hugepage_check(sleep_time, wait_time):
     time_last = 0
     while True:
         value = int(utils_memory.read_from_meminfo("AnonHugePages"))
         nr_hugepages.append(value)
         time_stamp = time.time()
         if time_last != 0:
             if nr_hugepages[-2] != nr_hugepages[-1]:
                 time_last = time_stamp
             elif time_stamp - time_last > wait_time:
                 logging.info("Huge page size stop changed")
                 break
         else:
             time_last = time_stamp
         time.sleep(sleep_time)
 def nr_hugepage_check(sleep_time, wait_time):
     time_last = 0
     while True:
         value = int(utils_memory.read_from_meminfo("AnonHugePages"))
         nr_hugepages.append(value)
         time_stamp = time.time()
         if time_last != 0:
             if nr_hugepages[-2] != nr_hugepages[-1]:
                 time_last = time_stamp
             elif time_stamp - time_last > wait_time:
                 logging.info("Huge page size stop changed")
                 break
         else:
             time_last = time_stamp
         time.sleep(sleep_time)
示例#3
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = (utils_memory.freememtotal() +
                  utils_memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
示例#4
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = (utils_memory.freememtotal() +
                  utils_memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
def run_trans_hugepage_relocated(test, params, env):
    """
    Transparent hugepage relocated test with quantification.
    The pages thp deamon will scan for one round set to 4096, and the sleep
    time will be set to 10 seconds. And alloc sleep time is set to 1 minute.
    So the hugepage size should increase 16M every 10 seconds, and when system
    is busy and it failed to allocate hugepage for guest, the value will keep
    the same in 1 minute. We will check that value every 10 seconds and check
    if it is following the rules.

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    def nr_hugepage_check(sleep_time, wait_time):
        time_last = 0
        while True:
            value = int(utils_memory.read_from_meminfo("AnonHugePages"))
            nr_hugepages.append(value)
            time_stamp = time.time()
            if time_last != 0:
                if nr_hugepages[-2] != nr_hugepages[-1]:
                    time_last = time_stamp
                elif time_stamp - time_last > wait_time:
                    logging.info("Huge page size stop changed")
                    break
            else:
                time_last = time_stamp
            time.sleep(sleep_time)

    logging.info("Relocated test start")
    login_timeout = float(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    free_memory = utils_memory.read_from_meminfo("MemFree")
    hugepage_size = utils_memory.read_from_meminfo("Hugepagesize")
    mem = params.get("mem")
    vmsm =  int(mem) + 128
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    if vmsm < int(free_memory) / 1024:
        nr_hugetlbfs = vmsm * 1024 / int(hugepage_size)
    else:
        nr_hugetlbfs = None
    # Get dd speed in host
    start_time = time.time()
    cmd = "dd if=/dev/urandom of=/tmp/speed_test bs=4K count=256"
    s, o = commands.getstatusoutput(cmd)
    end_time = time.time()
    dd_timeout = vmsm * (end_time - start_time) * 2
    nr_hugepages = []
    thp_cfg = params.get("thp_test_config")
    s_time = int(re.findall("scan_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000
    w_time = int(re.findall("alloc_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000

    try:
        logging.info("Turn off swap in guest")
        s, o = session.cmd_status_output("swapoff -a")
        if s != 0:
            logging.warning("Didn't turn off swap in guest")
        s, o = session.cmd_status_output("cat /proc/meminfo")
        mem_free_filter = "MemFree:\s+(.\d+)\s+(\w+)"
        guest_mem_free, guest_unit = re.findall(mem_free_filter, o)[0]
        if re.findall("[kK]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) / 1024)
        elif re.findall("[gG]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) * 1024)
        elif re.findall("[mM]", guest_unit):
            pass
        else:
            guest_mem_free = str(int(guest_mem_free) / 1024 / 1024)

        file_size = min(1024, int(guest_mem_free)/2)
        cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size
        s, o = session.cmd_status_output(cmd)
        if nr_hugetlbfs:
            hugepage_cfg = open(hugetlbfs_path, "w")
            hugepage_cfg.write(str(nr_hugetlbfs))
            hugepage_cfg.close()

        if not os.path.isdir('/space'):
            os.makedirs('/space')
        if os.system("mount -t tmpfs -o size=%sM none /space" % vmsm):
            raise error.TestError("Can not mount tmpfs")

        # Try to make some fragment in memory
        # The total size of fragments is vmsm
        count = vmsm * 1024 / 4
        cmd = "for i in `seq %s`; do dd if=/dev/urandom of=/space/$i" % count
        cmd += " bs=4K count=1 & done"
        logging.info("Start to make fragment in host")
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            raise error.TestError("Can not dd in host")
    finally:
        s, o = commands.getstatusoutput("umount /space")


    bg = utils_test.BackgroundTest(nr_hugepage_check, (s_time, w_time))
    bg.start()

    while bg.is_alive():
        count = file_size / 2
        cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count
        s, o = session.cmd_status_output(cmd, dd_timeout)

    if bg:
        bg.join()
    mem_increase_step = int(re.findall("pages_to_scan:(\d+)",
                            thp_cfg)[0]) / 512
    mem_increase = 0
    w_step = w_time / s_time + 1
    count = 0
    last_value = nr_hugepages.pop()
    while len(nr_hugepages) > 0:
        current = nr_hugepages.pop()
        if current == last_value:
            count += 1
        elif current < last_value:
            if last_value - current < mem_increase_step * 0.95:
                raise error.TestError("Hugepage memory increased too slow")
            mem_increase += last_value - current
            count = 0
        if count > w_step:
            logging.warning("Memory didn't increase in %s s" % (count
                             * s_time))
    if mem_increase < file_size * 0.5:
        raise error.TestError("Hugepages allocated can not reach a half: %s/%s"\
                              % (mem_increase, file_size))
    session.close()
    logging.info("Relocated test succeed")
示例#6
0
def run_virsh_memtune(test, params, env):
    """
    Test the command virsh memtune

    (1) To get the current memtune paramters
    (2) Change the parameter values
    (3) Check the memtune query updated with the values
    (4) Check whether the mounted cgroup path gets the updated value
    (5) Login to guest and use the memory greater that the assigned value
        and check whether it kills the vm.
    (6) TODO:Check more values and robust scenarios.
    """

    def check_hardlimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected hard limit value
        @return: True or False based on the checks
        """
        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname,"hard_limit")
        if actual_value == -1:
            raise error.TestFail("the key hard_limit not found in the "
                                 "virsh memtune output")
        if actual_value != int(expected_value):
            status_value = False
            logging.error("Hard limit virsh output:\n\tExpected value:%d"
                        "\n\tActual value: "
                        "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output)/1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error("Hard limit cgroup fs:\n\tExpected Value: %d"
                            "\n\tActual Value: "
                            "%d", int(expected_value), int(value))

        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value


    def check_softlimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected soft limit value
        @return: True or False based on the checks
        """

        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, "soft_limit")
        if actual_value == -1:
            raise error.TestFail("the key soft_limit not found in the "
                                 "virsh memtune output")

        if actual_value != int(expected_value):
            status_value = False
            logging.error("Soft limit virsh output:\n\tExpected value: %d"
                        "\n\tActual value: "
                        "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.soft_limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output) /1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error("Soft limit cgroup fs:\n\tExpected Value: %d"
                            "\n\tActual Value: "
                            "%d", int(expected_value), int(value))
        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value


    def check_hardswaplimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected hardswap limit value
        @return: True or False based on the checks
        """

        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, "swap_hard_limit")
        if actual_value == -1:
            raise error.TestFail("the key swap_hard_limit not found in the "
                                 "virsh memtune output")
        if actual_value != int(expected_value):
            status_value = False
            logging.error("Swap hard limit virsh output:\n\tExpected value: "
                        "%d\n\tActual value: "
                        "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.memsw.limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output)/1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error("Swap hard limit cgroup fs:\n\tExpected Value: "
                            "%d\n\tActual Value: "
                            "%d", int(expected_value), int(value))

        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value

    # Get the vm name, pid of vm and check for alive
    domname = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()
    logging.info("Verify valid cgroup path for VM pid: %s", pid)

    # Resolve the memory cgroup path for a domain
    path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")

    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    Memtotal = utils_memory.read_from_meminfo('MemTotal')
    base_mem = params.get("memtune_base_mem")

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("memtune_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Initialize error counter
    error_counter = 0

    # Check for memtune command is available in the libvirt version under test
    if not virsh.has_help_command("memtune"):
        raise error.TestNAError("Memtune not available in this libvirt version")


    # Run test case with 100kB increase in memory value for each iteration
    while (Mem < Memtotal):
        if virsh.has_command_help_match("memtune", "hard-limit"):
            hard_mem = Mem - int(params.get("memtune_hard_base_mem"))
            options = " --hard-limit %d --live" % hard_mem
            virsh.memtune_set(domname, options)
            if not check_hardlimit(path, hard_mem):
                error_counter += 1
        else:
            raise error.TestNAError("harlimit option not available in memtune "
                                    "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "soft-limit"):
            soft_mem = Mem - int(params.get("memtune_soft_base_mem"))
            options = " --soft-limit %d --live" % soft_mem
            virsh.memtune_set(domname, options)
            if not check_softlimit(path, soft_mem):
                error_counter += 1
        else:
            raise error.TestNAError("softlimit option not available in memtune "
                                    "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune","swap-hard-limit"):
            swaphard = Mem
            options = " --swap-hard-limit %d --live" % swaphard
            virsh.memtune_set(domname, options)
            if not check_hardswaplimit(path, swaphard):
                error_counter += 1
        else:
            raise error.TestNAError("swaplimit option not available in memtune "
                                    "cmd in this libvirt version")
        Mem += int(params.get("memtune_hard_base_mem"))

    # Raise error based on error_counter
    if error_counter > 0:
        raise error.TestFail("Test failed, consult the previous error messages")
示例#7
0
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    raise error.TestFail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
                          "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS") and
                       not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        raise error.TestFail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                            free_mem < (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")
示例#8
0
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, details:
            raise error.TestFail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal()
                         - utils_memory.read_from_meminfo("MemFree"))
                        / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
def run_trans_hugepage_memory_stress(test, params, env):
    """
    Run stress as a memory stress in guest for THP testing

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """

    nr_ah = []

    debugfs_flag = 1
    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
    mem = int(params.get("mem"))
    qemu_mem = int(params.get("qemu_mem", "64"))
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")

    error.context("smoke test setup")
    if not os.path.ismount(debugfs_path):
        if not os.path.isdir(debugfs_path):
            os.makedirs(debugfs_path)
        try:
            utils.system("mount -t debugfs none %s" % debugfs_path)
        except Exception:
            debugfs_flag = 0

    try:
        # Allocated free memory to hugetlbfs
        mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024
        mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024
        hugepage_size = (int(utils_memory.read_from_meminfo('Hugepagesize')) /
                         1024)
        nr_hugetlbfs = (mem_free + mem_swap - mem - qemu_mem) / hugepage_size
        fd = open(hugetlbfs_path, "w")
        fd.write(str(nr_hugetlbfs))
        fd.close()

        error.context("Memory stress test")

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        if nr_ah[0] <= 0:
            raise error.TestFail("VM is not using transparent hugepage")

        # Run stress memory heavy in guest
        memory_stress_test = params['thp_memory_stress']
        utils_test.run_virt_sub_test(test,
                                     params,
                                     env,
                                     sub_type=memory_stress_test)

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        logging.debug("The huge page using for guest is: %s" % nr_ah)

        if nr_ah[1] <= nr_ah[0]:
            logging.warn(
                "VM don't use transparent hugepage while memory stress")

        if debugfs_flag == 1:
            if int(open(hugetlbfs_path, 'r').read()) <= 0:
                raise error.TestFail("KVM doesn't use transparenthugepage")

        logging.info("memory stress test finished")
    finally:
        error.context("all tests cleanup")
        fd = open(hugetlbfs_path, "w")
        fd.write("0")
        fd.close()
        if os.path.ismount(debugfs_path):
            utils.run("umount %s" % debugfs_path)
        if os.path.isdir(debugfs_path):
            os.removedirs(debugfs_path)
def run_trans_hugepage_memory_stress(test, params, env):
    """
    Run stress as a memory stress in guest for THP testing

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """

    nr_ah = []

    debugfs_flag = 1
    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
    mem = int(params.get("mem"))
    qemu_mem = int(params.get("qemu_mem", "64"))
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")

    error.context("smoke test setup")
    if not os.path.ismount(debugfs_path):
        if not os.path.isdir(debugfs_path):
            os.makedirs(debugfs_path)
        try:
            utils.system("mount -t debugfs none %s" % debugfs_path)
        except Exception:
            debugfs_flag = 0

    try:
        # Allocated free memory to hugetlbfs
        mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024
        mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024
        hugepage_size = (int (utils_memory.read_from_meminfo('Hugepagesize'))
                         / 1024)
        nr_hugetlbfs = (mem_free + mem_swap - mem - qemu_mem) / hugepage_size
        fd = open(hugetlbfs_path, "w")
        fd.write(str(nr_hugetlbfs))
        fd.close()

        error.context("Memory stress test")

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        if nr_ah[0] <= 0:
            raise error.TestFail("VM is not using transparent hugepage")

        # Run stress memory heavy in guest
        memory_stress_test = params['thp_memory_stress']
        utils_test.run_virt_sub_test(test, params, env,
                                     sub_type=memory_stress_test)

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        logging.debug("The huge page using for guest is: %s" % nr_ah)

        if nr_ah[1] <= nr_ah[0]:
            logging.warn("VM don't use transparent hugepage while memory stress")

        if debugfs_flag == 1:
            if int(open(hugetlbfs_path, 'r').read()) <= 0:
                raise error.TestFail("KVM doesn't use transparenthugepage")

        logging.info("memory stress test finished")
    finally:
        error.context("all tests cleanup")
        fd = open(hugetlbfs_path, "w")
        fd.write("0")
        fd.close()
        if os.path.ismount(debugfs_path):
            utils.run("umount %s" % debugfs_path)
        if os.path.isdir(debugfs_path):
            os.removedirs(debugfs_path)
def run_trans_hugepage_relocated(test, params, env):
    """
    Transparent hugepage relocated test with quantification.
    The pages thp deamon will scan for one round set to 4096, and the sleep
    time will be set to 10 seconds. And alloc sleep time is set to 1 minute.
    So the hugepage size should increase 16M every 10 seconds, and when system
    is busy and it failed to allocate hugepage for guest, the value will keep
    the same in 1 minute. We will check that value every 10 seconds and check
    if it is following the rules.

    @param test: QEMU test object.
    @param params: Dictionary with test parameters.
    @param env: Dictionary with the test environment.
    """
    def nr_hugepage_check(sleep_time, wait_time):
        time_last = 0
        while True:
            value = int(utils_memory.read_from_meminfo("AnonHugePages"))
            nr_hugepages.append(value)
            time_stamp = time.time()
            if time_last != 0:
                if nr_hugepages[-2] != nr_hugepages[-1]:
                    time_last = time_stamp
                elif time_stamp - time_last > wait_time:
                    logging.info("Huge page size stop changed")
                    break
            else:
                time_last = time_stamp
            time.sleep(sleep_time)

    logging.info("Relocated test start")
    login_timeout = float(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    free_memory = utils_memory.read_from_meminfo("MemFree")
    hugepage_size = utils_memory.read_from_meminfo("Hugepagesize")
    mem = params.get("mem")
    vmsm = int(mem) + 128
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    if vmsm < int(free_memory) / 1024:
        nr_hugetlbfs = vmsm * 1024 / int(hugepage_size)
    else:
        nr_hugetlbfs = None
    # Get dd speed in host
    start_time = time.time()
    cmd = "dd if=/dev/urandom of=/tmp/speed_test bs=4K count=256"
    s, o = commands.getstatusoutput(cmd)
    end_time = time.time()
    dd_timeout = vmsm * (end_time - start_time) * 2
    nr_hugepages = []
    thp_cfg = params.get("thp_test_config")
    s_time = int(re.findall("scan_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000
    w_time = int(re.findall("alloc_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000

    try:
        logging.info("Turn off swap in guest")
        s, o = session.cmd_status_output("swapoff -a")
        if s != 0:
            logging.warning("Didn't turn off swap in guest")
        s, o = session.cmd_status_output("cat /proc/meminfo")
        mem_free_filter = "MemFree:\s+(.\d+)\s+(\w+)"
        guest_mem_free, guest_unit = re.findall(mem_free_filter, o)[0]
        if re.findall("[kK]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) / 1024)
        elif re.findall("[gG]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) * 1024)
        elif re.findall("[mM]", guest_unit):
            pass
        else:
            guest_mem_free = str(int(guest_mem_free) / 1024 / 1024)

        file_size = min(1024, int(guest_mem_free) / 2)
        cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size
        s, o = session.cmd_status_output(cmd)
        if nr_hugetlbfs:
            hugepage_cfg = open(hugetlbfs_path, "w")
            hugepage_cfg.write(str(nr_hugetlbfs))
            hugepage_cfg.close()

        if not os.path.isdir('/space'):
            os.makedirs('/space')
        if os.system("mount -t tmpfs -o size=%sM none /space" % vmsm):
            raise error.TestError("Can not mount tmpfs")

        # Try to make some fragment in memory
        # The total size of fragments is vmsm
        count = vmsm * 1024 / 4
        cmd = "for i in `seq %s`; do dd if=/dev/urandom of=/space/$i" % count
        cmd += " bs=4K count=1 & done"
        logging.info("Start to make fragment in host")
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            raise error.TestError("Can not dd in host")
    finally:
        s, o = commands.getstatusoutput("umount /space")

    bg = utils_test.BackgroundTest(nr_hugepage_check, (s_time, w_time))
    bg.start()

    while bg.is_alive():
        count = file_size / 2
        cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count
        s, o = session.cmd_status_output(cmd, dd_timeout)

    if bg:
        bg.join()
    mem_increase_step = int(re.findall("pages_to_scan:(\d+)",
                                       thp_cfg)[0]) / 512
    mem_increase = 0
    w_step = w_time / s_time + 1
    count = 0
    last_value = nr_hugepages.pop()
    while len(nr_hugepages) > 0:
        current = nr_hugepages.pop()
        if current == last_value:
            count += 1
        elif current < last_value:
            if last_value - current < mem_increase_step * 0.95:
                raise error.TestError("Hugepage memory increased too slow")
            mem_increase += last_value - current
            count = 0
        if count > w_step:
            logging.warning("Memory didn't increase in %s s" %
                            (count * s_time))
    if mem_increase < file_size * 0.5:
        raise error.TestError("Hugepages allocated can not reach a half: %s/%s"\
                              % (mem_increase, file_size))
    session.close()
    logging.info("Relocated test succeed")
示例#12
0
def run_virsh_memtune(test, params, env):
    """
    Test the command virsh memtune

    (1) To get the current memtune paramters
    (2) Change the parameter values
    (3) Check the memtune query updated with the values
    (4) Check whether the mounted cgroup path gets the updated value
    (5) Login to guest and use the memory greater that the assigned value
        and check whether it kills the vm.
    (6) TODO:Check more values and robust scenarios.
    """
    def check_hardlimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected hard limit value
        @return: True or False based on the checks
        """
        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, "hard_limit")
        if actual_value == -1:
            raise error.TestFail("the key hard_limit not found in the "
                                 "virsh memtune output")
        if actual_value != int(expected_value):
            status_value = False
            logging.error(
                "Hard limit virsh output:\n\tExpected value:%d"
                "\n\tActual value: "
                "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output) / 1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error(
                    "Hard limit cgroup fs:\n\tExpected Value: %d"
                    "\n\tActual Value: "
                    "%d", int(expected_value), int(value))

        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value

    def check_softlimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected soft limit value
        @return: True or False based on the checks
        """

        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, "soft_limit")
        if actual_value == -1:
            raise error.TestFail("the key soft_limit not found in the "
                                 "virsh memtune output")

        if actual_value != int(expected_value):
            status_value = False
            logging.error(
                "Soft limit virsh output:\n\tExpected value: %d"
                "\n\tActual value: "
                "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.soft_limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output) / 1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error(
                    "Soft limit cgroup fs:\n\tExpected Value: %d"
                    "\n\tActual Value: "
                    "%d", int(expected_value), int(value))
        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value

    def check_hardswaplimit(path, expected_value):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        @params: path: memory controller path for a domain
        @params: expected_value: the expected hardswap limit value
        @return: True or False based on the checks
        """

        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, "swap_hard_limit")
        if actual_value == -1:
            raise error.TestFail("the key swap_hard_limit not found in the "
                                 "virsh memtune output")
        if actual_value != int(expected_value):
            status_value = False
            logging.error(
                "Swap hard limit virsh output:\n\tExpected value: "
                "%d\n\tActual value: "
                "%d", int(expected_value), int(actual_value))

        # Check 2
        cmd = "cat %s/memory.memsw.limit_in_bytes" % path
        (status, output) = commands.getstatusoutput(cmd)
        # To normalize to kB
        value = int(output) / 1024
        if status == 0:
            if int(expected_value) != int(value):
                status_value = False
                logging.error(
                    "Swap hard limit cgroup fs:\n\tExpected Value: "
                    "%d\n\tActual Value: "
                    "%d", int(expected_value), int(value))

        else:
            status_value = False
            logging.error("Error while reading:\n%s", output)
        return status_value

    # Get the vm name, pid of vm and check for alive
    domname = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()
    logging.info("Verify valid cgroup path for VM pid: %s", pid)

    # Resolve the memory cgroup path for a domain
    path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")

    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    Memtotal = utils_memory.read_from_meminfo('MemTotal')
    base_mem = params.get("memtune_base_mem")

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("memtune_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Initialize error counter
    error_counter = 0

    # Check for memtune command is available in the libvirt version under test
    if not virsh.has_help_command("memtune"):
        raise error.TestNAError(
            "Memtune not available in this libvirt version")

    # Run test case with 100kB increase in memory value for each iteration
    while (Mem < Memtotal):
        if virsh.has_command_help_match("memtune", "hard-limit"):
            hard_mem = Mem - int(params.get("memtune_hard_base_mem"))
            options = " --hard-limit %d --live" % hard_mem
            virsh.memtune_set(domname, options)
            if not check_hardlimit(path, hard_mem):
                error_counter += 1
        else:
            raise error.TestNAError("harlimit option not available in memtune "
                                    "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "soft-limit"):
            soft_mem = Mem - int(params.get("memtune_soft_base_mem"))
            options = " --soft-limit %d --live" % soft_mem
            virsh.memtune_set(domname, options)
            if not check_softlimit(path, soft_mem):
                error_counter += 1
        else:
            raise error.TestNAError(
                "softlimit option not available in memtune "
                "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "swap-hard-limit"):
            swaphard = Mem
            options = " --swap-hard-limit %d --live" % swaphard
            virsh.memtune_set(domname, options)
            if not check_hardswaplimit(path, swaphard):
                error_counter += 1
        else:
            raise error.TestNAError(
                "swaplimit option not available in memtune "
                "cmd in this libvirt version")
        Mem += int(params.get("memtune_hard_base_mem"))

    # Raise error based on error_counter
    if error_counter > 0:
        raise error.TestFail(
            "Test failed, consult the previous error messages")
示例#13
0
def run_virsh_nodememstats(test, params, env):
    """
    Test the command virsh nodememstats

    (1) Call the virsh nodememstats command
    (2) Get the output
    (3) Check the against /proc/meminfo output
    (4) Call the virsh nodememstats command with an unexpected option
    (5) Call the virsh nodememstats command with libvirtd service stop
    """

    # Initialize the variables
    expected = {}
    actual = {}
    deltas = []
    name_stats = ['total', 'free', 'buffers', 'cached']
    itr = int(params.get("itr"))

    def virsh_check_nodememtats(actual_stats, expected_stats, delta):
        """
        Check the nodememstats output value with /proc/meminfo value
        """

        delta_stats = {}
        for name in name_stats:
            delta_stats[name] = abs(actual_stats[name] - expected_stats[name])
            if 'total' in name:
                if not delta_stats[name] == 0:
                    raise error.TestFail("Command 'virsh nodememstats' not"
                                         " succeeded as the value for %s is "
                                         "deviated by %d\nThe total memory "
                                         "value is deviating-check" %
                                         (name, delta_stats[name]))
            else:
                if delta_stats[name] > delta:
                    raise error.TestFail("Command 'virsh nodememstats' not "
                                         "succeeded as the value for %s"
                                         " is deviated by %d" %
                                         (name, delta_stats[name]))
        return delta_stats

    # Prepare libvirtd service
    check_libvirtd = params.has_key("libvirtd")
    if check_libvirtd:
        libvirtd = params.get("libvirtd")
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

    # Get the option for the test case
    option = params.get("virsh_nodememstats_options")

    # Run test case for 10 iterations
    # (default can be changed in subtests.cfg file)
    # and print the final statistics
    for i in range(itr):
        output = virsh.nodememstats(option)

        # Get the status of the virsh command executed
        status = output.exit_status

        # Get status_error option for the test case
        status_error = params.get("status_error")
        if status_error == "yes":
            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    raise error.TestFail("Command 'virsh nodememstats' "
                                         "succeeded with libvirtd service"
                                         " stopped, incorrect")
                else:
                    raise error.TestFail("Command 'virsh nodememstats %s' "
                                         "succeeded (incorrect command)" %
                                         option)

        elif status_error == "no":
            if status == 0:
                # From the beginning of a line, group 1 is one or
                # more word-characters, followed by zero or more
                # whitespace characters and a ':', then one or
                # more whitespace characters, followed by group 2,
                # which is one or more digit characters,
                # then one or more whitespace characters followed by
                # a literal 'kB' or 'KiB' sequence, e.g as below
                # total  :              3809340 kB
                # total  :              3809340 KiB
                # Normalise the value to MBs
                regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)\s\w+")
                expected = {}

                for line in output.stdout.split('\n'):
                    match_obj = regex_obj.search(line)
                    # Due to the extra space in the list
                    if match_obj is not None:
                        name = match_obj.group(1)
                        value = match_obj.group(2)
                        expected[name] = int(value) / 1024

                # Get the actual value from /proc/meminfo and normalise to MBs
                actual['total'] = int(utils_memory.memtotal()) / 1024
                actual['free'] = int(utils_memory.freememtotal()) / 1024
                actual['buffers'] = int(
                    utils_memory.read_from_meminfo('Buffers')) / 1024
                actual['cached'] = int(
                    utils_memory.read_from_meminfo('Cached')) / 1024

                # Currently the delta value is kept at 200 MB this can be
                # tuned based on the accuracy
                # Check subtests.cfg for more details
                delta = int(params.get("delta"))
                output = virsh_check_nodememtats(actual, expected, delta)
                deltas.append(output)

            else:
                raise error.TestFail("Command virsh nodememstats %s not "
                                     "succeeded:\n%s" % (option, status))

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Print the deviated values for all iterations
    if status_error == "no":
        logging.debug("The following is the deviations from "
                      "the actual(/proc/meminfo) and expected"
                      " value(output of virsh nodememstats)")

        for i in range(itr):
            logging.debug("iteration %d:", i)
            for index, name in enumerate(name_stats):
                logging.debug("%19s : %d", name, deltas[i][name])