コード例 #1
0
 def get_hp_rsvd():
     """
     A generator to get HugePages_Rsvd until it does not change
     """
     stable = False
     hp_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd")
     while True:
         yield stable
         cur_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd")
         stable = cur_rsvd == hp_rsvd
         hp_rsvd = cur_rsvd
コード例 #2
0
def mem_step(params, path, vm, test, acceptable_minus=8):
    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    base_mem = int(params.get("mt_base_mem"))
    hard_base = int(params.get("mt_hard_base_mem"))
    soft_base = int(params.get("mt_soft_base_mem"))

    # Get MemTotal of host
    Memtotal = utils_memory.read_from_meminfo('MemTotal')

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("mt_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Run test case with 100kB increase in memory value for each iteration
    start = time.time()
    while (Mem < Memtotal):
        # If time pass over 60 secondes, exit directly from while
        if time.time() - start > 60:
            break
        hard_mem = Mem - hard_base
        soft_mem = Mem - soft_base
        swaphard = Mem

        mt_limits = [str(hard_mem), str(soft_mem), str(swaphard)]
        options = " %s --live" % ' '.join(mt_limits)

        result = virsh.memtune_set(vm.name, options, debug=True)
        libvirt.check_exit_status(result)
        check_limits(path, mt_limits, vm, test, acceptable_minus)

        Mem += hard_base
コード例 #3
0
ファイル: virsh_memtune.py プロジェクト: nasastry/tp-libvirt
def mem_step(params, path, vm, test, acceptable_minus=8):
    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    base_mem = int(params.get("mt_base_mem"))
    hard_base = int(params.get("mt_hard_base_mem"))
    soft_base = int(params.get("mt_soft_base_mem"))

    # Get MemTotal of host
    Memtotal = utils_memory.read_from_meminfo('MemTotal')

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("mt_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Run test case with 100kB increase in memory value for each iteration
    start = time.time()
    while (Mem < Memtotal):
        # If time pass over 60 secondes, exit directly from while
        if time.time() - start > 60:
            break
        hard_mem = Mem - hard_base
        soft_mem = Mem - soft_base
        swaphard = Mem

        mt_limits = [str(hard_mem), str(soft_mem), str(swaphard)]
        options = " %s --live" % ' '.join(mt_limits)

        result = virsh.memtune_set(vm.name, options)
        check_limits(path, mt_limits, vm, test, acceptable_minus)

        Mem += hard_base
コード例 #4
0
 def nr_hugepage_check(sleep_time, wait_time):
     time_last = 0
     while True:
         value = int(utils_memory.read_from_meminfo("AnonHugePages"))
         nr_hugepages.append(value)
         time_stamp = time.time()
         if time_last != 0:
             if nr_hugepages[-2] != nr_hugepages[-1]:
                 time_last = time_stamp
             elif time_stamp - time_last > wait_time:
                 logging.info("Huge page size stop changed")
                 break
         else:
             time_last = time_stamp
         time.sleep(sleep_time)
コード例 #5
0
 def nr_hugepage_check(sleep_time, wait_time):
     time_last = 0
     while True:
         value = int(utils_memory.read_from_meminfo("AnonHugePages"))
         nr_hugepages.append(value)
         time_stamp = time.time()
         if time_last != 0:
             if nr_hugepages[-2] != nr_hugepages[-1]:
                 time_last = time_stamp
             elif time_stamp - time_last > wait_time:
                 logging.info("Huge page size stop changed")
                 break
         else:
             time_last = time_stamp
         time.sleep(sleep_time)
コード例 #6
0
ファイル: __init__.py プロジェクト: Keepod/virt-test
def get_memory_info(lvms):
    """
    Get memory information from host and guests in format:
    Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}

    :params lvms: List of VM objects
    :return: String with memory info report
    """
    if not isinstance(lvms, list):
        raise error.TestError("Invalid list passed to get_stat: %s " % lvms)

    try:
        meminfo = "Host: memfree = "
        meminfo += str(int(utils_memory.freememtotal()) / 1024) + "M; "
        meminfo += "swapfree = "
        mf = int(utils_memory.read_from_meminfo("SwapFree")) / 1024
        meminfo += str(mf) + "M; "
    except Exception, e:
        raise error.TestFail("Could not fetch host free memory info, "
                             "reason: %s" % e)
コード例 #7
0
ファイル: __init__.py プロジェクト: cshastri/virt-test
def get_memory_info(lvms):
    """
    Get memory information from host and guests in format:
    Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}

    :params lvms: List of VM objects
    :return: String with memory info report
    """
    if not isinstance(lvms, list):
        raise error.TestError("Invalid list passed to get_stat: %s " % lvms)

    try:
        meminfo = "Host: memfree = "
        meminfo += str(int(utils_memory.freememtotal()) / 1024) + "M; "
        meminfo += "swapfree = "
        mf = int(utils_memory.read_from_meminfo("SwapFree")) / 1024
        meminfo += str(mf) + "M; "
    except Exception, e:
        raise error.TestFail("Could not fetch host free memory info, "
                             "reason: %s" % e)
コード例 #8
0
ファイル: ksm_overcommit.py プロジェクト: bssrikanth/tp-qemu
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    test.fail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
                          "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS") and
                       not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        test.fail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                              free_mem < (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")
コード例 #9
0
ファイル: ksm_overcommit.py プロジェクト: bssrikanth/tp-qemu
def run(test, params, env):
    """
    Tests KSM (Kernel Shared Memory) capability by allocating and filling
    KVM guests memory using various values. KVM sets the memory as
    MADV_MERGEABLE so all VM's memory can be merged. The workers in
    guest writes to tmpfs filesystem thus allocations are not limited
    by process max memory, only by VM's memory. Two test modes are supported -
    serial and parallel.

    Serial mode - uses multiple VMs, allocates memory per guest and always
                  verifies the correct number of shared memory.
                  0) Prints out the setup and initialize guest(s)
                  1) Fills guest with the same number (S1)
                  2) Random fill on the first guest
                  3) Random fill of the remaining VMs one by one until the
                     memory is completely filled (KVM stops machines which
                     asks for additional memory until there is available
                     memory) (S2, shouldn't finish)
                  4) Destroy all VMs but the last one
                  5) Checks the last VMs memory for corruption
    Parallel mode - uses one VM with multiple allocator workers. Executes
                   scenarios in parallel to put more stress on the KVM.
                   0) Prints out the setup and initialize guest(s)
                   1) Fills memory with the same number (S1)
                   2) Fills memory with random numbers (S2)
                   3) Verifies all pages
                   4) Fills memory with the same number (S2)
                   5) Changes the last 96B (S3)

    Scenarios:
    S1) Fill all vms with the same value (all pages should be merged into 1)
    S2) Random fill (all pages should be splitted)
    S3) Fill last 96B (change only last 96B of each page; some pages will be
                      merged; there was a bug with data corruption)
    Every worker has unique random key so we are able to verify the filled
    values.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.

    :param cfg: ksm_swap - use swap?
    :param cfg: ksm_overcommit_ratio - memory overcommit (serial mode only)
    :param cfg: ksm_parallel_ratio - number of workers (parallel mode only)
    :param cfg: ksm_host_reserve - override memory reserve on host in MB
    :param cfg: ksm_guest_reserve - override memory reserve on guests in MB
    :param cfg: ksm_mode - test mode {serial, parallel}
    :param cfg: ksm_perf_ratio - performance ratio, increase it when your
                                 machine is too slow
    """
    def _start_allocator(vm, session, timeout):
        """
        Execute ksm_overcommit_guest.py on guest, wait until it's initialized.

        :param vm: VM object.
        :param session: Remote session to a VM object.
        :param timeout: Timeout that will be used to verify if
                ksm_overcommit_guest.py started properly.
        """
        logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
        session.sendline("python /tmp/ksm_overcommit_guest.py")
        try:
            session.read_until_last_line_matches(["PASS:"******"FAIL:"], timeout)
        except aexpect.ExpectProcessTerminatedError as details:
            e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
                     (vm.name, str(details)))
            test.fail(e_msg)

    def _execute_allocator(command, vm, session, timeout):
        """
        Execute a given command on ksm_overcommit_guest.py main loop,
        indicating the vm the command was executed on.

        :param command: Command that will be executed.
        :param vm: VM object.
        :param session: Remote session to VM object.
        :param timeout: Timeout used to verify expected output.

        :return: Tuple (match index, data)
        """
        logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, "
                      "vm: %s, timeout: %s", command, vm.name, timeout)
        session.sendline(command)
        try:
            (match, data) = session.read_until_last_line_matches(
                ["PASS:"******"FAIL:"],
                timeout)
        except aexpect.ExpectProcessTerminatedError as details:
            e_msg = ("Failed to execute command '%s' on "
                     "ksm_overcommit_guest.py, vm '%s': %s" %
                     (command, vm.name, str(details)))
            test.fail(e_msg)
        return (match, data)

    def get_ksmstat():
        """
        Return sharing memory by ksm in MB

        :return: memory in MB
        """
        fpages = open('/sys/kernel/mm/ksm/pages_sharing')
        ksm_pages = int(fpages.read())
        fpages.close()
        return ((ksm_pages * 4096) / 1e6)

    def initialize_guests():
        """
        Initialize guests (fill their memories with specified patterns).
        """
        logging.info("Phase 1: filling guest memory pages")
        for session in lsessions:
            vm = lvms[lsessions.index(session)]

            logging.debug("Turning off swap on vm %s", vm.name)
            session.cmd("swapoff -a", timeout=300)

            # Start the allocator
            _start_allocator(vm, session, 60 * perf_ratio)

        # Execute allocator on guests
        for i in range(0, vmsc):
            vm = lvms[i]

            cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % skeys[0]
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * 2 * perf_ratio)

            # Let ksm_overcommit_guest.py do its job
            # (until shared mem reaches expected value)
            shm = 0
            j = 0
            logging.debug("Target shared meminfo for guest %s: %s", vm.name,
                          ksm_size)
            while ((new_ksm and (shm < (ksm_size * (i + 1)))) or
                    (not new_ksm and (shm < (ksm_size)))):
                if j > 64:
                    logging.debug(utils_test.get_memory_info(lvms))
                    test.error("SHM didn't merge the memory until "
                               "the DL on guest: %s" % vm.name)
                pause = ksm_size / 200 * perf_ratio
                logging.debug("Waiting %ds before proceeding...", pause)
                time.sleep(pause)
                if (new_ksm):
                    shm = get_ksmstat()
                else:
                    shm = vm.get_shared_meminfo()
                logging.debug("Shared meminfo for guest %s after "
                              "iteration %s: %s", vm.name, j, shm)
                j += 1

        # Keep some reserve
        pause = ksm_size / 200 * perf_ratio
        logging.debug("Waiting %ds before proceeding...", pause)
        time.sleep(pause)

        logging.debug(utils_test.get_memory_info(lvms))
        logging.info("Phase 1: PASS")

    def separate_first_guest():
        """
        Separate memory of the first guest by generating special random series
        """
        logging.info("Phase 2: Split the pages on the first guest")

        cmd = "mem.static_random_fill()"
        data = _execute_allocator(cmd, lvms[0], lsessions[0],
                                  fill_base_timeout * 2 * perf_ratio)[1]

        r_msg = data.splitlines()[-1]
        logging.debug("Return message of static_random_fill: %s", r_msg)
        out = int(r_msg.split()[4])
        logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size,
                      out, (ksm_size * 1000 / out))
        logging.debug(utils_test.get_memory_info(lvms))
        logging.debug("Phase 2: PASS")

    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    test.fail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
                          "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS") and
                       not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        test.fail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                              free_mem < (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")

    def split_parallel():
        """
        Parallel page spliting
        """
        logging.info("Phase 1: parallel page spliting")
        # We have to wait until allocator is finished (it waits 5 seconds to
        # clean the socket

        session = lsessions[0]
        vm = lvms[0]
        for i in range(1, max_alloc):
            lsessions.append(vm.wait_for_login(timeout=360))

        session.cmd("swapoff -a", timeout=300)

        for i in range(0, max_alloc):
            # Start the allocator
            _start_allocator(vm, lsessions[i], 60 * perf_ratio)

        logging.info("Phase 1: PASS")

        logging.info("Phase 2a: Simultaneous merging")
        logging.debug("Memory used by allocator on guests = %dMB",
                      (ksm_size / max_alloc))

        for i in range(0, max_alloc):
            cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
                                                 skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % (skeys[0])
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * perf_ratio)

        # Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3)
        shm = 0
        i = 0
        logging.debug("Target shared memory size: %s", ksm_size)
        while (shm < ksm_size):
            if i > 64:
                logging.debug(utils_test.get_memory_info(lvms))
                test.error("SHM didn't merge the memory until DL")
            pause = ksm_size / 200 * perf_ratio
            logging.debug("Waiting %ds before proceed...", pause)
            time.sleep(pause)
            if (new_ksm):
                shm = get_ksmstat()
            else:
                shm = vm.get_shared_meminfo()
            logging.debug("Shared meminfo after attempt %s: %s", i, shm)
            i += 1

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2a: PASS")

        logging.info("Phase 2b: Simultaneous spliting")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            logging.debug(data)
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          (ksm_size / max_alloc), out,
                          (ksm_size * 1000 / out / max_alloc))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2b: PASS")

        logging.info("Phase 2c: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2c: PASS")

        logging.info("Phase 2d: Simultaneous merging")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.value_fill(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * 2 * perf_ratio)[1]
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2d: PASS")

        logging.info("Phase 2e: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.value_check(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2e: PASS")

        logging.info("Phase 2f: Simultaneous spliting last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill(96)"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          ksm_size / max_alloc, out,
                          (ksm_size * 1000 / out / max_alloc))

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2f: PASS")

        logging.info("Phase 2g: Simultaneous verification last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify(96)"
            _, data = _execute_allocator(cmd, vm, lsessions[i],
                                         (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2g: PASS")

        logging.debug("Cleaning up...")
        for i in range(0, max_alloc):
            lsessions[i].cmd_output("die()", 20)
        session.close()
        vm.destroy(gracefully=False)

    # Main test code
    logging.info("Starting phase 0: Initialization")
    if process.run("ps -C ksmtuned", ignore_status=True).exit_status == 0:
        logging.info("Killing ksmtuned...")
        process.run("killall ksmtuned")
    new_ksm = False
    if (os.path.exists("/sys/kernel/mm/ksm/run")):
        process.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs", shell=True)
        process.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan", shell=True)
        process.run("echo 1 > /sys/kernel/mm/ksm/run", shell=True)

        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
        if os.path.exists(e_up):
            process.run("echo 'never' > %s" % e_up, shell=True)
        if os.path.exists(e_rh):
            process.run("echo 'never' > %s" % e_rh, shell=True)
        new_ksm = True
    else:
        try:
            process.run("modprobe ksm")
            process.run("ksmctl start 5000 100")
        except process.CmdError as details:
            test.fail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        try:
            available = utils_memory.read_from_meminfo("MemAvailable")
        except process.CmdError:  # ancient kernels
            utils_memory.drop_caches()
            available = utils_memory.read_from_meminfo("MemFree")
        # default host_reserve = UsedMem + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal() - available) / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
        _guest_reserve = False

    max_vms = int(params.get("max_vms", 2))
    overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
    max_alloc = int(params.get("ksm_parallel_ratio", 1))

    # vmsc: count of all used VMs
    vmsc = int(overcommit) + 1
    vmsc = max(vmsc, max_vms)

    if (params['ksm_mode'] == "serial"):
        max_alloc = vmsc
        if _host_reserve:
            # First round of additional guest reserves
            host_reserve += vmsc * 64
            _host_reserve = vmsc

    host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve)

    ksm_swap = False
    if params.get("ksm_swap") == "yes":
        ksm_swap = True

    # Performance ratio
    perf_ratio = params.get("ksm_perf_ratio")
    if perf_ratio:
        perf_ratio = float(perf_ratio)
    else:
        perf_ratio = 1

    if (params['ksm_mode'] == "parallel"):
        vmsc = 1
        overcommit = 1
        mem = host_mem
        # 32bit system adjustment
        if "64" not in params.get("vm_arch_name"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if (host_mem > 3100):
                mem = 3100

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                mem = 3100 - 64

    else:
        # mem: Memory of the guest systems. Maximum must be less than
        # host's physical ram
        mem = int(overcommit * host_mem / vmsc)

        # 32bit system adjustment
        if not params['image_name'].endswith("64"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if mem - guest_reserve - 1 > 3100:
                vmsc = int(math.ceil((host_mem * overcommit) /
                                     (3100 + guest_reserve)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                vmsc = int(math.ceil((host_mem * overcommit) /
                                     (3100 - 64.0)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

    # 0.055 represents OS + TMPFS additional reserve per guest ram MB
    if _guest_reserve:
        guest_reserve += math.ceil(mem * 0.055)

    swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024

    logging.debug("Overcommit = %f", overcommit)
    logging.debug("True overcommit = %f ", (float(vmsc * mem) /
                                            float(host_mem)))
    logging.debug("Host memory = %dM", host_mem)
    logging.debug("Guest memory = %dM", mem)
    logging.debug("Using swap = %s", ksm_swap)
    logging.debug("Swap = %dM", swap)
    logging.debug("max_vms = %d", max_vms)
    logging.debug("Count of all used VMs = %d", vmsc)
    logging.debug("Performance_ratio = %f", perf_ratio)

    # Generate unique keys for random series
    skeys = []
    dkeys = []
    for i in range(0, max(vmsc, max_alloc)):
        key = random.randrange(0, 255)
        while key in skeys:
            key = random.randrange(0, 255)
        skeys.append(key)

        key = random.randrange(0, 999)
        while key in dkeys:
            key = random.randrange(0, 999)
        dkeys.append(key)

    logging.debug("skeys: %s", skeys)
    logging.debug("dkeys: %s", dkeys)

    lvms = []
    lsessions = []

    # As we don't know the number and memory amount of VMs in advance,
    # we need to specify and create them here
    vm_name = params["main_vm"]
    params['mem'] = mem
    params['vms'] = vm_name
    # Associate pidfile name
    params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
                                                                 'pid')
    if not params.get('extra_params'):
        params['extra_params'] = ' '
    params['extra_params_' + vm_name] = params.get('extra_params')
    params['extra_params_' + vm_name] += (" -pidfile %s" %
                                          (params.get('pid_' + vm_name)))
    params['extra_params'] = params.get('extra_params_' + vm_name)

    # ksm_size: amount of memory used by allocator
    ksm_size = mem - guest_reserve
    logging.debug("Memory used by allocator on guests = %dM", ksm_size)
    fill_base_timeout = ksm_size / 10

    # Creating the first guest
    env_process.preprocess_vm(test, params, env, vm_name)
    lvms.append(env.get_vm(vm_name))
    if not lvms[0]:
        test.error("VM object not found in environment")
    if not lvms[0].is_alive():
        test.error("VM seems to be dead; Test requires a living VM")

    logging.debug("Booting first guest %s", lvms[0].name)

    lsessions.append(lvms[0].wait_for_login(timeout=360))
    # Associate vm PID
    try:
        tmp = open(params.get('pid_' + vm_name), 'r')
        params['pid_' + vm_name] = int(tmp.readline())
    except Exception:
        test.fail("Could not get PID of %s" % (vm_name))

    # Creating other guest systems
    for i in range(1, vmsc):
        vm_name = "vm" + str(i + 1)
        params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
                                                                     'pid')
        params['extra_params_' + vm_name] = params.get('extra_params')
        params['extra_params_' + vm_name] += (" -pidfile %s" %
                                              (params.get('pid_' + vm_name)))
        params['extra_params'] = params.get('extra_params_' + vm_name)

        # Last VM is later used to run more allocators simultaneously
        lvms.append(lvms[0].clone(vm_name, params))
        env.register_vm(vm_name, lvms[i])
        params['vms'] += " " + vm_name

        logging.debug("Booting guest %s", lvms[i].name)
        lvms[i].create()
        if not lvms[i].is_alive():
            test.error("VM %s seems to be dead; Test requires a"
                       "living VM" % lvms[i].name)

        lsessions.append(lvms[i].wait_for_login(timeout=360))
        try:
            tmp = open(params.get('pid_' + vm_name), 'r')
            params['pid_' + vm_name] = int(tmp.readline())
        except Exception:
            test.fail("Could not get PID of %s" % (vm_name))

    # Let guests rest a little bit :-)
    pause = vmsc * 2 * perf_ratio
    logging.debug("Waiting %ds before proceed", pause)
    time.sleep(vmsc * 2 * perf_ratio)
    logging.debug(utils_test.get_memory_info(lvms))

    # Copy ksm_overcommit_guest.py into guests
    vksmd_src = os.path.join(data_dir.get_shared_dir(),
                             "scripts", "ksm_overcommit_guest.py")
    dst_dir = "/tmp"
    for vm in lvms:
        vm.copy_files_to(vksmd_src, dst_dir)
    logging.info("Phase 0: PASS")

    if params['ksm_mode'] == "parallel":
        logging.info("Starting KSM test parallel mode")
        split_parallel()
        logging.info("KSM test parallel mode: PASS")
    elif params['ksm_mode'] == "serial":
        logging.info("Starting KSM test serial mode")
        initialize_guests()
        separate_first_guest()
        split_guest()
        logging.info("KSM test serial mode: PASS")
コード例 #10
0
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, details:
            raise error.TestFail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal() -
                         utils_memory.read_from_meminfo("MemFree")) / 1024 +
                        128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
コード例 #11
0
def run_trans_hugepage_relocated(test, params, env):
    """
    Transparent hugepage relocated test with quantification.
    The pages thp deamon will scan for one round set to 4096, and the sleep
    time will be set to 10 seconds. And alloc sleep time is set to 1 minute.
    So the hugepage size should increase 16M every 10 seconds, and when system
    is busy and it failed to allocate hugepage for guest, the value will keep
    the same in 1 minute. We will check that value every 10 seconds and check
    if it is following the rules.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def nr_hugepage_check(sleep_time, wait_time):
        time_last = 0
        while True:
            value = int(utils_memory.read_from_meminfo("AnonHugePages"))
            nr_hugepages.append(value)
            time_stamp = time.time()
            if time_last != 0:
                if nr_hugepages[-2] != nr_hugepages[-1]:
                    time_last = time_stamp
                elif time_stamp - time_last > wait_time:
                    logging.info("Huge page size stop changed")
                    break
            else:
                time_last = time_stamp
            time.sleep(sleep_time)

    logging.info("Relocated test start")
    login_timeout = float(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    free_memory = utils_memory.read_from_meminfo("MemFree")
    hugepage_size = utils_memory.read_from_meminfo("Hugepagesize")
    mem = params.get("mem")
    vmsm = int(mem) + 128
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    if vmsm < int(free_memory) / 1024:
        nr_hugetlbfs = vmsm * 1024 / int(hugepage_size)
    else:
        nr_hugetlbfs = None
    # Get dd speed in host
    start_time = time.time()
    cmd = "dd if=/dev/urandom of=/tmp/speed_test bs=4K count=256"
    s, o = commands.getstatusoutput(cmd)
    end_time = time.time()
    dd_timeout = vmsm * (end_time - start_time) * 2
    nr_hugepages = []
    thp_cfg = params.get("thp_test_config")
    s_time = int(re.findall("scan_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000
    w_time = int(re.findall("alloc_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000

    try:
        logging.info("Turn off swap in guest")
        s, o = session.cmd_status_output("swapoff -a")
        if s != 0:
            logging.warning("Didn't turn off swap in guest")
        s, o = session.cmd_status_output("cat /proc/meminfo")
        mem_free_filter = "MemFree:\s+(.\d+)\s+(\w+)"
        guest_mem_free, guest_unit = re.findall(mem_free_filter, o)[0]
        if re.findall("[kK]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) / 1024)
        elif re.findall("[gG]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) * 1024)
        elif re.findall("[mM]", guest_unit):
            pass
        else:
            guest_mem_free = str(int(guest_mem_free) / 1024 / 1024)

        file_size = min(1024, int(guest_mem_free) / 2)
        cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size
        s, o = session.cmd_status_output(cmd)
        if nr_hugetlbfs:
            hugepage_cfg = open(hugetlbfs_path, "w")
            hugepage_cfg.write(str(nr_hugetlbfs))
            hugepage_cfg.close()

        if not os.path.isdir('/space'):
            os.makedirs('/space')
        if os.system("mount -t tmpfs -o size=%sM none /space" % vmsm):
            raise error.TestError("Can not mount tmpfs")

        # Try to make some fragment in memory
        # The total size of fragments is vmsm
        count = vmsm * 1024 / 4
        cmd = "for i in `seq %s`; do dd if=/dev/urandom of=/space/$i" % count
        cmd += " bs=4K count=1 & done"
        logging.info("Start to make fragment in host")
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            raise error.TestError("Can not dd in host")
    finally:
        s, o = commands.getstatusoutput("umount /space")

    bg = utils_test.BackgroundTest(nr_hugepage_check, (s_time, w_time))
    bg.start()

    while bg.is_alive():
        count = file_size / 2
        cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count
        s, o = session.cmd_status_output(cmd, dd_timeout)

    if bg:
        bg.join()
    mem_increase_step = int(re.findall("pages_to_scan:(\d+)",
                            thp_cfg)[0]) / 512
    mem_increase = 0
    w_step = w_time / s_time + 1
    count = 0
    last_value = nr_hugepages.pop()
    while len(nr_hugepages) > 0:
        current = nr_hugepages.pop()
        if current == last_value:
            count += 1
        elif current < last_value:
            if last_value - current < mem_increase_step * 0.95:
                raise error.TestError("Hugepage memory increased too slow")
            mem_increase += last_value - current
            count = 0
        if count > w_step:
            logging.warning("Memory didn't increase in %s s" % (count
                                                                * s_time))
    if mem_increase < file_size * 0.5:
        raise error.TestError("Hugepages allocated can not reach a half: %s/%s"
                              % (mem_increase, file_size))
    session.close()
    logging.info("Relocated test succeed")
コード例 #12
0
def run(test, params, env):
    """
    Run stress as a memory stress in guest for THP testing

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """

    nr_ah = []

    debugfs_flag = 1
    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
    mem = int(params.get("mem"))
    qemu_mem = int(params.get("qemu_mem", "64"))
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")

    error.context("smoke test setup")
    if not os.path.ismount(debugfs_path):
        if not os.path.isdir(debugfs_path):
            os.makedirs(debugfs_path)
        try:
            utils.system("mount -t debugfs none %s" % debugfs_path)
        except Exception:
            debugfs_flag = 0

    try:
        # Allocated free memory to hugetlbfs
        mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024
        mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024
        hugepage_size = (int(utils_memory.read_from_meminfo('Hugepagesize')) /
                         1024)
        nr_hugetlbfs = (mem_free + mem_swap - mem - qemu_mem) / hugepage_size
        fd = open(hugetlbfs_path, "w")
        fd.write(str(nr_hugetlbfs))
        fd.close()

        error.context("Memory stress test")

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        if nr_ah[0] <= 0:
            raise error.TestFail("VM is not using transparent hugepage")

        # Run stress memory heavy in guest
        memory_stress_test = params['thp_memory_stress']
        utils_test.run_virt_sub_test(test, params, env,
                                     sub_type=memory_stress_test)

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        logging.debug("The huge page using for guest is: %s" % nr_ah)

        if nr_ah[1] <= nr_ah[0]:
            logging.warn(
                "VM don't use transparent hugepage while memory stress")

        if debugfs_flag == 1:
            if int(open(hugetlbfs_path, 'r').read()) <= 0:
                raise error.TestFail("KVM doesn't use transparenthugepage")

        logging.info("memory stress test finished")
    finally:
        error.context("all tests cleanup")
        fd = open(hugetlbfs_path, "w")
        fd.write("0")
        fd.close()
        if os.path.ismount(debugfs_path):
            utils.run("umount %s" % debugfs_path)
        if os.path.isdir(debugfs_path):
            os.removedirs(debugfs_path)
コード例 #13
0
def run(test, params, env):
    """
    Test the command virsh nodememstats

    (1) Call the virsh nodememstats command
    (2) Get the output
    (3) Check the against /proc/meminfo output
    (4) Call the virsh nodememstats command with an unexpected option
    (5) Call the virsh nodememstats command with libvirtd service stop
    """

    # Initialize the variables
    expected = {}
    actual = {}
    deltas = []
    name_stats = ['total', 'free', 'buffers', 'cached']
    itr = int(params.get("itr"))

    def virsh_check_nodememtats(actual_stats, expected_stats, delta):
        """
        Check the nodememstats output value with /proc/meminfo value
        """

        delta_stats = {}
        for name in name_stats:
            delta_stats[name] = abs(actual_stats[name] - expected_stats[name])
            if 'total' in name:
                if not delta_stats[name] == 0:
                    test.fail("Command 'virsh nodememstats' not"
                              " succeeded as the value for %s is "
                              "deviated by %d\nThe total memory "
                              "value is deviating-check" %
                              (name, delta_stats[name]))
            else:
                if delta_stats[name] > delta:
                    test.fail("Command 'virsh nodememstats' not "
                              "succeeded as the value for %s"
                              " is deviated by %d" % (name, delta_stats[name]))
        return delta_stats

    # Prepare libvirtd service
    check_libvirtd = "libvirtd" in params
    if check_libvirtd:
        libvirtd = params.get("libvirtd")
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

    # Get the option for the test case
    option = params.get("virsh_nodememstats_options")
    if option == "max":
        cell_dict = utils_test.libvirt.get_all_cells()
        option = len(list(cell_dict.keys()))

    # Run test case for 10 iterations
    # (default can be changed in subtests.cfg file)
    # and print the final statistics
    for i in range(itr):
        output = virsh.nodememstats(option)

        # Get the status of the virsh command executed
        status = output.exit_status

        # Get status_error option for the test case
        status_error = params.get("status_error")
        if status_error == "yes":
            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    test.fail("Command 'virsh nodememstats' "
                              "succeeded with libvirtd service"
                              " stopped, incorrect")
                else:
                    test.fail("Command 'virsh nodememstats %s' "
                              "succeeded (incorrect command)" % option)

        elif status_error == "no":
            if status == 0:
                if option:
                    return
                # From the beginning of a line, group 1 is one or
                # more word-characters, followed by zero or more
                # whitespace characters and a ':', then one or
                # more whitespace characters, followed by group 2,
                # which is one or more digit characters,
                # then one or more whitespace characters followed by
                # a literal 'kB' or 'KiB' sequence, e.g as below
                # total  :              3809340 kB
                # total  :              3809340 KiB
                # Normalise the value to MBs
                regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)\s\w+")
                expected = {}

                for line in output.stdout.split('\n'):
                    match_obj = regex_obj.search(line)
                    # Due to the extra space in the list
                    if match_obj is not None:
                        name = match_obj.group(1)
                        value = match_obj.group(2)
                        expected[name] = int(value) // 1024

                # Get the actual value from /proc/meminfo and normalise to MBs
                actual['total'] = int(utils_memory.memtotal()) // 1024
                actual['free'] = int(utils_memory.freememtotal()) // 1024
                actual['buffers'] = int(
                    utils_memory.read_from_meminfo('Buffers')) // 1024
                actual['cached'] = int(
                    utils_memory.read_from_meminfo('Cached')) // 1024

                # Currently the delta value is kept at 200 MB this can be
                # tuned based on the accuracy
                # Check subtests.cfg for more details
                delta = int(params.get("delta"))
                output = virsh_check_nodememtats(actual, expected, delta)
                deltas.append(output)

            else:
                test.fail("Command virsh nodememstats %s not "
                          "succeeded:\n%s" % (option, status))

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Print the deviated values for all iterations
    if status_error == "no":
        logging.debug("The following is the deviations from "
                      "the actual(/proc/meminfo) and expected"
                      " value(output of virsh nodememstats)")

        for i in range(itr):
            logging.debug("iteration %d:", i)
            for index, name in enumerate(name_stats):
                logging.debug("%19s : %d", name, deltas[i][name])
コード例 #14
0
def run(test, params, env):
    """
    Tests KSM (Kernel Shared Memory) capability by allocating and filling
    KVM guests memory using various values. KVM sets the memory as
    MADV_MERGEABLE so all VM's memory can be merged. The workers in
    guest writes to tmpfs filesystem thus allocations are not limited
    by process max memory, only by VM's memory. Two test modes are supported -
    serial and parallel.

    Serial mode - uses multiple VMs, allocates memory per guest and always
                  verifies the correct number of shared memory.
                  0) Prints out the setup and initialize guest(s)
                  1) Fills guest with the same number (S1)
                  2) Random fill on the first guest
                  3) Random fill of the remaining VMs one by one until the
                     memory is completely filled (KVM stops machines which
                     asks for additional memory until there is available
                     memory) (S2, shouldn't finish)
                  4) Destroy all VMs but the last one
                  5) Checks the last VMs memory for corruption
    Parallel mode - uses one VM with multiple allocator workers. Executes
                   scenarios in parallel to put more stress on the KVM.
                   0) Prints out the setup and initialize guest(s)
                   1) Fills memory with the same number (S1)
                   2) Fills memory with random numbers (S2)
                   3) Verifies all pages
                   4) Fills memory with the same number (S2)
                   5) Changes the last 96B (S3)

    Scenarios:
    S1) Fill all vms with the same value (all pages should be merged into 1)
    S2) Random fill (all pages should be splitted)
    S3) Fill last 96B (change only last 96B of each page; some pages will be
                      merged; there was a bug with data corruption)
    Every worker has unique random key so we are able to verify the filled
    values.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.

    :param cfg: ksm_swap - use swap?
    :param cfg: ksm_overcommit_ratio - memory overcommit (serial mode only)
    :param cfg: ksm_parallel_ratio - number of workers (parallel mode only)
    :param cfg: ksm_host_reserve - override memory reserve on host in MB
    :param cfg: ksm_guest_reserve - override memory reserve on guests in MB
    :param cfg: ksm_mode - test mode {serial, parallel}
    :param cfg: ksm_perf_ratio - performance ratio, increase it when your
                                 machine is too slow
    """
    def _start_allocator(vm, session, timeout):
        """
        Execute ksm_overcommit_guest.py on guest, wait until it's initialized.

        :param vm: VM object.
        :param session: Remote session to a VM object.
        :param timeout: Timeout that will be used to verify if
                ksm_overcommit_guest.py started properly.
        """
        logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
        session.sendline("python /tmp/ksm_overcommit_guest.py")
        try:
            session.read_until_last_line_matches(["PASS:"******"FAIL:"], timeout)
        except aexpect.ExpectProcessTerminatedError as details:
            e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
                     (vm.name, str(details)))
            test.fail(e_msg)

    def _execute_allocator(command, vm, session, timeout):
        """
        Execute a given command on ksm_overcommit_guest.py main loop,
        indicating the vm the command was executed on.

        :param command: Command that will be executed.
        :param vm: VM object.
        :param session: Remote session to VM object.
        :param timeout: Timeout used to verify expected output.

        :return: Tuple (match index, data)
        """
        logging.debug(
            "Executing '%s' on ksm_overcommit_guest.py loop, "
            "vm: %s, timeout: %s", command, vm.name, timeout)
        session.sendline(command)
        try:
            (match,
             data) = session.read_until_last_line_matches(["PASS:"******"FAIL:"],
                                                          timeout)
        except aexpect.ExpectProcessTerminatedError as details:
            e_msg = ("Failed to execute command '%s' on "
                     "ksm_overcommit_guest.py, vm '%s': %s" %
                     (command, vm.name, str(details)))
            test.fail(e_msg)
        return (match, data)

    def get_ksmstat():
        """
        Return sharing memory by ksm in MB

        :return: memory in MB
        """
        fpages = open('/sys/kernel/mm/ksm/pages_sharing')
        ksm_pages = int(fpages.read())
        fpages.close()
        return ((ksm_pages * 4096) / 1e6)

    def initialize_guests():
        """
        Initialize guests (fill their memories with specified patterns).
        """
        logging.info("Phase 1: filling guest memory pages")
        for session in lsessions:
            vm = lvms[lsessions.index(session)]

            logging.debug("Turning off swap on vm %s", vm.name)
            session.cmd("swapoff -a", timeout=300)

            # Start the allocator
            _start_allocator(vm, session, 60 * perf_ratio)

        # Execute allocator on guests
        for i in range(0, vmsc):
            vm = lvms[i]

            cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % skeys[0]
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * 2 * perf_ratio)

            # Let ksm_overcommit_guest.py do its job
            # (until shared mem reaches expected value)
            shm = 0
            j = 0
            logging.debug("Target shared meminfo for guest %s: %s", vm.name,
                          ksm_size)
            while ((new_ksm and (shm < (ksm_size * (i + 1))))
                   or (not new_ksm and (shm < (ksm_size)))):
                if j > 64:
                    logging.debug(utils_test.get_memory_info(lvms))
                    test.error("SHM didn't merge the memory until "
                               "the DL on guest: %s" % vm.name)
                pause = ksm_size / 200 * perf_ratio
                logging.debug("Waiting %ds before proceeding...", pause)
                time.sleep(pause)
                if (new_ksm):
                    shm = get_ksmstat()
                else:
                    shm = vm.get_shared_meminfo()
                logging.debug(
                    "Shared meminfo for guest %s after "
                    "iteration %s: %s", vm.name, j, shm)
                j += 1

        # Keep some reserve
        pause = ksm_size / 200 * perf_ratio
        logging.debug("Waiting %ds before proceeding...", pause)
        time.sleep(pause)

        logging.debug(utils_test.get_memory_info(lvms))
        logging.info("Phase 1: PASS")

    def separate_first_guest():
        """
        Separate memory of the first guest by generating special random series
        """
        logging.info("Phase 2: Split the pages on the first guest")

        cmd = "mem.static_random_fill()"
        data = _execute_allocator(cmd, lvms[0], lsessions[0],
                                  fill_base_timeout * 2 * perf_ratio)[1]

        r_msg = data.splitlines()[-1]
        logging.debug("Return message of static_random_fill: %s", r_msg)
        out = int(r_msg.split()[4])
        logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size,
                      out, (ksm_size * 1000 / out))
        logging.debug(utils_test.get_memory_info(lvms))
        logging.debug("Phase 2: PASS")

    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    test.fail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug(
                "Executing %s on ksm_overcommit_guest.py loop, "
                "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS")
                       and not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        test.fail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (
                            free_mem +
                            int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and free_mem <
                                              (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")

    def split_parallel():
        """
        Parallel page spliting
        """
        logging.info("Phase 1: parallel page spliting")
        # We have to wait until allocator is finished (it waits 5 seconds to
        # clean the socket

        session = lsessions[0]
        vm = lvms[0]
        for i in range(1, max_alloc):
            lsessions.append(vm.wait_for_login(timeout=360))

        session.cmd("swapoff -a", timeout=300)

        for i in range(0, max_alloc):
            # Start the allocator
            _start_allocator(vm, lsessions[i], 60 * perf_ratio)

        logging.info("Phase 1: PASS")

        logging.info("Phase 2a: Simultaneous merging")
        logging.debug("Memory used by allocator on guests = %dMB",
                      (ksm_size / max_alloc))

        for i in range(0, max_alloc):
            cmd = "mem = MemFill(%d, %s, %s)" % (
                (ksm_size / max_alloc), skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % (skeys[0])
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * perf_ratio)

        # Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3)
        shm = 0
        i = 0
        logging.debug("Target shared memory size: %s", ksm_size)
        while (shm < ksm_size):
            if i > 64:
                logging.debug(utils_test.get_memory_info(lvms))
                test.error("SHM didn't merge the memory until DL")
            pause = ksm_size / 200 * perf_ratio
            logging.debug("Waiting %ds before proceed...", pause)
            time.sleep(pause)
            if (new_ksm):
                shm = get_ksmstat()
            else:
                shm = vm.get_shared_meminfo()
            logging.debug("Shared meminfo after attempt %s: %s", i, shm)
            i += 1

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2a: PASS")

        logging.info("Phase 2b: Simultaneous spliting")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            logging.debug(data)
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          (ksm_size / max_alloc), out,
                          (ksm_size * 1000 / out / max_alloc))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2b: PASS")

        logging.info("Phase 2c: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2c: PASS")

        logging.info("Phase 2d: Simultaneous merging")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.value_fill(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * 2 * perf_ratio)[1]
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2d: PASS")

        logging.info("Phase 2e: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.value_check(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2e: PASS")

        logging.info("Phase 2f: Simultaneous spliting last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill(96)"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          ksm_size / max_alloc, out,
                          (ksm_size * 1000 / out / max_alloc))

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2f: PASS")

        logging.info("Phase 2g: Simultaneous verification last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify(96)"
            _, data = _execute_allocator(cmd, vm, lsessions[i],
                                         (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2g: PASS")

        logging.debug("Cleaning up...")
        for i in range(0, max_alloc):
            lsessions[i].cmd_output("die()", 20)
        session.close()
        vm.destroy(gracefully=False)

    # Main test code
    logging.info("Starting phase 0: Initialization")
    if process.run("ps -C ksmtuned", ignore_status=True).exit_status == 0:
        logging.info("Killing ksmtuned...")
        process.run("killall ksmtuned")
    new_ksm = False
    if (os.path.exists("/sys/kernel/mm/ksm/run")):
        process.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs", shell=True)
        process.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan", shell=True)
        process.run("echo 1 > /sys/kernel/mm/ksm/run", shell=True)

        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
        if os.path.exists(e_up):
            process.run("echo 'never' > %s" % e_up, shell=True)
        if os.path.exists(e_rh):
            process.run("echo 'never' > %s" % e_rh, shell=True)
        new_ksm = True
    else:
        try:
            process.run("modprobe ksm")
            process.run("ksmctl start 5000 100")
        except process.CmdError as details:
            test.fail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        try:
            available = utils_memory.read_from_meminfo("MemAvailable")
        except process.CmdError:  # ancient kernels
            utils_memory.drop_caches()
            available = utils_memory.read_from_meminfo("MemFree")
        # default host_reserve = UsedMem + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal() - available) / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
        _guest_reserve = False

    max_vms = int(params.get("max_vms", 2))
    overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
    max_alloc = int(params.get("ksm_parallel_ratio", 1))

    # vmsc: count of all used VMs
    vmsc = int(overcommit) + 1
    vmsc = max(vmsc, max_vms)

    if (params['ksm_mode'] == "serial"):
        max_alloc = vmsc
        if _host_reserve:
            # First round of additional guest reserves
            host_reserve += vmsc * 64
            _host_reserve = vmsc

    host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve)

    ksm_swap = False
    if params.get("ksm_swap") == "yes":
        ksm_swap = True

    # Performance ratio
    perf_ratio = params.get("ksm_perf_ratio")
    if perf_ratio:
        perf_ratio = float(perf_ratio)
    else:
        perf_ratio = 1

    if (params['ksm_mode'] == "parallel"):
        vmsc = 1
        overcommit = 1
        mem = host_mem
        # 32bit system adjustment
        if "64" not in params.get("vm_arch_name"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if (host_mem > 3100):
                mem = 3100

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                mem = 3100 - 64

    else:
        # mem: Memory of the guest systems. Maximum must be less than
        # host's physical ram
        mem = int(overcommit * host_mem / vmsc)

        # 32bit system adjustment
        if not params['image_name'].endswith("64"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if mem - guest_reserve - 1 > 3100:
                vmsc = int(
                    math.ceil(
                        (host_mem * overcommit) / (3100 + guest_reserve)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                vmsc = int(math.ceil((host_mem * overcommit) / (3100 - 64.0)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

    # 0.055 represents OS + TMPFS additional reserve per guest ram MB
    if _guest_reserve:
        guest_reserve += math.ceil(mem * 0.055)

    swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024

    logging.debug("Overcommit = %f", overcommit)
    logging.debug("True overcommit = %f ",
                  (float(vmsc * mem) / float(host_mem)))
    logging.debug("Host memory = %dM", host_mem)
    logging.debug("Guest memory = %dM", mem)
    logging.debug("Using swap = %s", ksm_swap)
    logging.debug("Swap = %dM", swap)
    logging.debug("max_vms = %d", max_vms)
    logging.debug("Count of all used VMs = %d", vmsc)
    logging.debug("Performance_ratio = %f", perf_ratio)

    # Generate unique keys for random series
    skeys = []
    dkeys = []
    for i in range(0, max(vmsc, max_alloc)):
        key = random.randrange(0, 255)
        while key in skeys:
            key = random.randrange(0, 255)
        skeys.append(key)

        key = random.randrange(0, 999)
        while key in dkeys:
            key = random.randrange(0, 999)
        dkeys.append(key)

    logging.debug("skeys: %s", skeys)
    logging.debug("dkeys: %s", dkeys)

    lvms = []
    lsessions = []

    # As we don't know the number and memory amount of VMs in advance,
    # we need to specify and create them here
    vm_name = params["main_vm"]
    params['mem'] = mem
    params['vms'] = vm_name
    # Associate pidfile name
    params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(
        vm_name, 'pid')
    if not params.get('extra_params'):
        params['extra_params'] = ' '
    params['extra_params_' + vm_name] = params.get('extra_params')
    params['extra_params_' + vm_name] += (" -pidfile %s" %
                                          (params.get('pid_' + vm_name)))
    params['extra_params'] = params.get('extra_params_' + vm_name)

    # ksm_size: amount of memory used by allocator
    ksm_size = mem - guest_reserve
    logging.debug("Memory used by allocator on guests = %dM", ksm_size)
    fill_base_timeout = ksm_size / 10

    # Creating the first guest
    env_process.preprocess_vm(test, params, env, vm_name)
    lvms.append(env.get_vm(vm_name))
    if not lvms[0]:
        test.error("VM object not found in environment")
    if not lvms[0].is_alive():
        test.error("VM seems to be dead; Test requires a living VM")

    logging.debug("Booting first guest %s", lvms[0].name)

    lsessions.append(lvms[0].wait_for_login(timeout=360))
    # Associate vm PID
    try:
        tmp = open(params.get('pid_' + vm_name), 'r')
        params['pid_' + vm_name] = int(tmp.readline())
    except Exception:
        test.fail("Could not get PID of %s" % (vm_name))

    # Creating other guest systems
    for i in range(1, vmsc):
        vm_name = "vm" + str(i + 1)
        params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(
            vm_name, 'pid')
        params['extra_params_' + vm_name] = params.get('extra_params')
        params['extra_params_' + vm_name] += (" -pidfile %s" %
                                              (params.get('pid_' + vm_name)))
        params['extra_params'] = params.get('extra_params_' + vm_name)

        # Last VM is later used to run more allocators simultaneously
        lvms.append(lvms[0].clone(vm_name, params))
        env.register_vm(vm_name, lvms[i])
        params['vms'] += " " + vm_name

        logging.debug("Booting guest %s", lvms[i].name)
        lvms[i].create()
        if not lvms[i].is_alive():
            test.error("VM %s seems to be dead; Test requires a"
                       "living VM" % lvms[i].name)

        lsessions.append(lvms[i].wait_for_login(timeout=360))
        try:
            tmp = open(params.get('pid_' + vm_name), 'r')
            params['pid_' + vm_name] = int(tmp.readline())
        except Exception:
            test.fail("Could not get PID of %s" % (vm_name))

    # Let guests rest a little bit :-)
    pause = vmsc * 2 * perf_ratio
    logging.debug("Waiting %ds before proceed", pause)
    time.sleep(vmsc * 2 * perf_ratio)
    logging.debug(utils_test.get_memory_info(lvms))

    # Copy ksm_overcommit_guest.py into guests
    vksmd_src = os.path.join(data_dir.get_shared_dir(), "scripts",
                             "ksm_overcommit_guest.py")
    dst_dir = "/tmp"
    for vm in lvms:
        vm.copy_files_to(vksmd_src, dst_dir)
    logging.info("Phase 0: PASS")

    if params['ksm_mode'] == "parallel":
        logging.info("Starting KSM test parallel mode")
        split_parallel()
        logging.info("KSM test parallel mode: PASS")
    elif params['ksm_mode'] == "serial":
        logging.info("Starting KSM test serial mode")
        initialize_guests()
        separate_first_guest()
        split_guest()
        logging.info("KSM test serial mode: PASS")
コード例 #15
0
def run(test, params, env):
    """
    Run stress as a memory stress in guest for THP testing

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """

    nr_ah = []

    debugfs_flag = 1
    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
    mem = int(params.get("mem"))
    qemu_mem = int(params.get("qemu_mem", "64"))
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    vm = env.get_vm(params["main_vm"])

    error_context.context("smoke test setup")
    if not os.path.ismount(debugfs_path):
        if not os.path.isdir(debugfs_path):
            os.makedirs(debugfs_path)
        try:
            process.system("mount -t debugfs none %s" % debugfs_path,
                           shell=True)
        except Exception:
            debugfs_flag = 0

    try:
        # Allocated free memory to hugetlbfs
        mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024
        mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024
        hugepage_size = (int(utils_memory.read_from_meminfo('Hugepagesize')) /
                         1024)
        nr_hugetlbfs = (mem_free + mem_swap - mem - qemu_mem) / hugepage_size
        fd = open(hugetlbfs_path, "w")
        fd.write(str(nr_hugetlbfs))
        fd.close()

        error_context.context("Memory stress test")

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        if nr_ah[0] <= 0:
            test.fail("VM is not using transparent hugepage")

        # Run stress memory heavy in guest
        test_mem = float(mem)*float(params.get("mem_ratio", 0.8))
        stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2)
        stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args)
        stress_test.load_stress_tool()
        time.sleep(int(params.get("stress_time", 120)))
        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        logging.debug("The huge page using for guest is: %s" % nr_ah)

        if nr_ah[1] <= nr_ah[0]:
            logging.warn(
                "VM don't use transparent hugepage while memory stress")

        if debugfs_flag == 1:
            if int(open(hugetlbfs_path, 'r').read()) <= 0:
                test.fail("KVM doesn't use transparenthugepage")

        logging.info("memory stress test finished")
        stress_test.unload_stress()
        stress_test.clean()
    finally:
        error_context.context("all tests cleanup")
        fd = open(hugetlbfs_path, "w")
        fd.write("0")
        fd.close()
        if os.path.ismount(debugfs_path):
            process.run("umount %s" % debugfs_path, shell=True)
        if os.path.isdir(debugfs_path):
            os.removedirs(debugfs_path)
コード例 #16
0
ファイル: ksm_overcommit.py プロジェクト: arges/tp-qemu
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, details:
            raise error.TestFail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal()
                         - utils_memory.read_from_meminfo("MemFree"))
                        / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
コード例 #17
0
def run(test, params, env):
    """
    Qemu memory hotplug test:
    1) Boot guest with -m option
    2) Hotplug memory to guest with option reserve enable/disabled
    3) Check memory inside guest
    4) Check hugepages on host

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_hp_rsvd():
        """
        A generator to get HugePages_Rsvd until it does not change
        """
        stable = False
        hp_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd")
        while True:
            yield stable
            cur_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd")
            stable = cur_rsvd == hp_rsvd
            hp_rsvd = cur_rsvd

    vm = env.get_vm(params["main_vm"])
    vm.wait_for_login()
    mem_name = params["target_mems"]
    hp_size = utils_memory.read_from_meminfo("Hugepagesize")
    hp_total = utils_memory.read_from_meminfo("HugePages_Total")
    size_target_mem = params["size_mem_%s" % mem_name]
    hp_target = int(float(normalize_data_size(size_target_mem, "K")) / hp_size)\
        + int(hp_total)
    process.system("echo %s > /proc/sys/vm/nr_hugepages" % hp_target,
                   shell=True)
    hotplug_test = MemoryHotplugTest(test, params, env)
    hotplug_test.hotplug_memory(vm, mem_name)
    hotplug_test.check_memory(vm)
    timeout = int(params.get("check_timeout", 60))
    rsvd_is_stable = get_hp_rsvd()
    if not wait_for(lambda: next(rsvd_is_stable), timeout, 5, 3):
        test.error("HugePages_Rsvd is not stable in %ss" % timeout)
    try:
        hugepage_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd")
        logging.info("HugePages_Rsvd is %s after hotplug memory", hugepage_rsvd)
        if params["reserve_mem"] == "yes":
            hugepages_total = utils_memory.read_from_meminfo("HugePages_Total")
            hugepages_free = utils_memory.read_from_meminfo("HugePages_Free")
            hugepagesize = utils_memory.read_from_meminfo("Hugepagesize")
            logging.info("HugePages_Total is %s, hugepages_free is %s",
                         hugepages_total, hugepages_free)
            plug_size = params["size_mem_%s" % mem_name]
            numa_size = params["size_mem_%s" % params["mem_devs"]]
            expected_size = float(normalize_data_size(plug_size, "K")) + \
                float(normalize_data_size(numa_size, "K"))
            page_number = hugepages_total - hugepages_free + hugepage_rsvd
            if page_number * hugepagesize != int(expected_size):
                test.fail("HugePages_Total - HugePages_Free + HugePages_Rsvd is"
                          "not equal to memory backend size")
        else:
            if hugepage_rsvd != 0:
                test.fail("HugePages_Rsvd is not 0 when reserve option is off")
    finally:
        vm.destroy()
コード例 #18
0
def run(test, params, env):
    """
    Transparent hugepage relocated test with quantification.
    The pages thp deamon will scan for one round set to 4096, and the sleep
    time will be set to 10 seconds. And alloc sleep time is set to 1 minute.
    So the hugepage size should increase 16M every 10 seconds, and when system
    is busy and it failed to allocate hugepage for guest, the value will keep
    the same in 1 minute. We will check that value every 10 seconds and check
    if it is following the rules.

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    def nr_hugepage_check(sleep_time, wait_time):
        time_last = 0
        while True:
            value = int(utils_memory.read_from_meminfo("AnonHugePages"))
            nr_hugepages.append(value)
            time_stamp = time.time()
            if time_last != 0:
                if nr_hugepages[-2] != nr_hugepages[-1]:
                    time_last = time_stamp
                elif time_stamp - time_last > wait_time:
                    logging.info("Huge page size stop changed")
                    break
            else:
                time_last = time_stamp
            time.sleep(sleep_time)

    logging.info("Relocated test start")
    login_timeout = float(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    free_memory = utils_memory.read_from_meminfo("MemFree")
    hugepage_size = utils_memory.read_from_meminfo("Hugepagesize")
    mem = params.get("mem")
    vmsm = int(mem) + 128
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")
    if vmsm < int(free_memory) / 1024:
        nr_hugetlbfs = vmsm * 1024 / int(hugepage_size)
    else:
        nr_hugetlbfs = None
    # Get dd speed in host
    start_time = time.time()
    cmd = "dd if=/dev/urandom of=/tmp/speed_test bs=4K count=256"
    s, o = commands.getstatusoutput(cmd)
    end_time = time.time()
    dd_timeout = vmsm * (end_time - start_time) * 2
    nr_hugepages = []
    thp_cfg = params.get("thp_test_config")
    s_time = int(re.findall("scan_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000
    w_time = int(re.findall("alloc_sleep_millisecs:(\d+)", thp_cfg)[0]) / 1000

    try:
        logging.info("Turn off swap in guest")
        s, o = session.cmd_status_output("swapoff -a")
        if s != 0:
            logging.warning("Didn't turn off swap in guest")
        s, o = session.cmd_status_output("cat /proc/meminfo")
        mem_free_filter = "MemFree:\s+(.\d+)\s+(\w+)"
        guest_mem_free, guest_unit = re.findall(mem_free_filter, o)[0]
        if re.findall("[kK]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) / 1024)
        elif re.findall("[gG]", guest_unit):
            guest_mem_free = str(int(guest_mem_free) * 1024)
        elif re.findall("[mM]", guest_unit):
            pass
        else:
            guest_mem_free = str(int(guest_mem_free) / 1024 / 1024)

        file_size = min(1024, int(guest_mem_free) / 2)
        cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size
        s, o = session.cmd_status_output(cmd)
        if nr_hugetlbfs:
            hugepage_cfg = open(hugetlbfs_path, "w")
            hugepage_cfg.write(str(nr_hugetlbfs))
            hugepage_cfg.close()

        if not os.path.isdir('/space'):
            os.makedirs('/space')
        if os.system("mount -t tmpfs -o size=%sM none /space" % vmsm):
            raise error.TestError("Can not mount tmpfs")

        # Try to make some fragment in memory
        # The total size of fragments is vmsm
        count = vmsm * 1024 / 4
        cmd = "for i in `seq %s`; do dd if=/dev/urandom of=/space/$i" % count
        cmd += " bs=4K count=1 & done"
        logging.info("Start to make fragment in host")
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            raise error.TestError("Can not dd in host")
    finally:
        s, o = commands.getstatusoutput("umount /space")

    bg = utils_test.BackgroundTest(nr_hugepage_check, (s_time, w_time))
    bg.start()

    while bg.is_alive():
        count = file_size / 2
        cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count
        s, o = session.cmd_status_output(cmd, dd_timeout)

    if bg:
        bg.join()
    mem_increase_step = int(re.findall("pages_to_scan:(\d+)",
                                       thp_cfg)[0]) / 512
    mem_increase = 0
    w_step = w_time / s_time + 1
    count = 0
    last_value = nr_hugepages.pop()
    while len(nr_hugepages) > 0:
        current = nr_hugepages.pop()
        if current == last_value:
            count += 1
        elif current < last_value:
            if last_value - current < mem_increase_step * 0.95:
                raise error.TestError("Hugepage memory increased too slow")
            mem_increase += last_value - current
            count = 0
        if count > w_step:
            logging.warning("Memory didn't increase in %s s" % (count *
                                                                s_time))
    if mem_increase < file_size * 0.5:
        raise error.TestError("Hugepages allocated can not reach a half: %s/%s"
                              % (mem_increase, file_size))
    session.close()
    logging.info("Relocated test succeed")
コード例 #19
0
def run(test, params, env):
    """
    Test the command virsh memtune

    (1) To get the current memtune parameters
    (2) Change the parameter values
    (3) Check the memtune query updated with the values
    (4) Check whether the mounted cgroup path gets the updated value
    (5) Login to guest and use the memory greater that the assigned value
        and check whether it kills the vm.
    (6) TODO:Check more values and robust scenarios.
    """
    def check_limit(path, expected_value, limit_name):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        :params: path: memory controller path for a domain
        :params: expected_value: the expected limit value
        :params: limit_name: the limit to be checked
                             hard_limit/soft_limit/swap_hard_limit
        :return: True or False based on the checks
        """
        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, limit_name)
        if actual_value == -1:
            test.fail("the key %s not found in the "
                      "virsh memtune output" % limit_name)
        if actual_value != int(expected_value):
            status_value = False
            logging.error(
                "%s virsh output:\n\tExpected value:%d"
                "\n\tActual value: "
                "%d", limit_name, int(expected_value), int(actual_value))

        # Check 2
        if limit_name == 'hard_limit':
            cg_file_name = '%s/memory.limit_in_bytes' % path
        elif limit_name == 'soft_limit':
            cg_file_name = '%s/memory.soft_limit_in_bytes' % path
        elif limit_name == 'swap_hard_limit':
            cg_file_name = '%s/memory.memsw.limit_in_bytes' % path

        cg_file = None
        try:
            with open(cg_file_name) as cg_file:
                output = cg_file.read()
            value = int(output) / 1024
            if int(expected_value) != int(value):
                status_value = False
                logging.error(
                    "%s cgroup fs:\n\tExpected Value: %d"
                    "\n\tActual Value: "
                    "%d", limit_name, int(expected_value), int(value))
        except IOError:
            status_value = False
            logging.error("Error while reading:\n%s", cg_file_name)

        return status_value

    # Get the vm name, pid of vm and check for alive
    domname = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()
    logging.info("Verify valid cgroup path for VM pid: %s", pid)

    # Resolve the memory cgroup path for a domain
    path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")

    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    Memtotal = utils_memory.read_from_meminfo('MemTotal')
    base_mem = params.get("memtune_base_mem")

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("memtune_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Initialize error counter
    error_counter = 0

    # Check for memtune command is available in the libvirt version under test
    if not virsh.has_help_command("memtune"):
        test.cancel("Memtune not available in this libvirt version")

    # Run test case with 100kB increase in memory value for each iteration
    while (Mem < Memtotal):
        if virsh.has_command_help_match("memtune", "hard-limit"):
            hard_mem = Mem - int(params.get("memtune_hard_base_mem"))
            options = " --hard-limit %d --live" % hard_mem
            virsh.memtune_set(domname, options)
            if not check_limit(path, hard_mem, "hard_limit"):
                error_counter += 1
        else:
            test.cancel("harlimit option not available in memtune "
                        "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "soft-limit"):
            soft_mem = Mem - int(params.get("memtune_soft_base_mem"))
            options = " --soft-limit %d --live" % soft_mem
            virsh.memtune_set(domname, options)
            if not check_limit(path, soft_mem, "soft_limit"):
                error_counter += 1
        else:
            test.cancel("softlimit option not available in memtune "
                        "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "swap-hard-limit"):
            swaphard = Mem
            options = " --swap-hard-limit %d --live" % swaphard
            virsh.memtune_set(domname, options)
            if not check_limit(path, swaphard, "swap_hard_limit"):
                error_counter += 1
        else:
            test.cancel("swaplimit option not available in memtune "
                        "cmd in this libvirt version")
        Mem += int(params.get("memtune_hard_base_mem"))

    # Raise error based on error_counter
    if error_counter > 0:
        test.fail("Test failed, consult the previous error messages")
コード例 #20
0
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    test.fail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug(
                "Executing %s on ksm_overcommit_guest.py loop, "
                "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS")
                       and not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        test.fail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (
                            free_mem +
                            int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and free_mem <
                                              (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")
コード例 #21
0
def run_virsh_nodememstats(test, params, env):
    """
    Test the command virsh nodememstats

    (1) Call the virsh nodememstats command
    (2) Get the output
    (3) Check the against /proc/meminfo output
    (4) Call the virsh nodememstats command with an unexpected option
    (5) Call the virsh nodememstats command with libvirtd service stop
    """

    # Initialize the variables
    expected = {}
    actual = {}
    deltas = []
    name_stats = ['total', 'free', 'buffers', 'cached']
    itr = int(params.get("itr"))

    def virsh_check_nodememtats(actual_stats, expected_stats, delta):
        """
        Check the nodememstats output value with /proc/meminfo value
        """

        delta_stats = {}
        for name in name_stats:
            delta_stats[name] = abs(actual_stats[name] - expected_stats[name])
            if 'total' in name:
                if not delta_stats[name] == 0:
                    raise error.TestFail("Command 'virsh nodememstats' not"
                                         " succeeded as the value for %s is "
                                         "deviated by %d\nThe total memory "
                                         "value is deviating-check"
                                         % (name, delta_stats[name]))
            else:
                if delta_stats[name] > delta:
                    raise error.TestFail("Command 'virsh nodememstats' not "
                                         "succeeded as the value for %s"
                                         " is deviated by %d"
                                         % (name, delta_stats[name]))
        return delta_stats

    # Prepare libvirtd service
    check_libvirtd = params.has_key("libvirtd")
    if check_libvirtd:
        libvirtd = params.get("libvirtd")
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

    # Get the option for the test case
    option = params.get("virsh_nodememstats_options")

    # Run test case for 10 iterations
    # (default can be changed in subtests.cfg file)
    # and print the final statistics
    for i in range(itr):
        output = virsh.nodememstats(option)

        # Get the status of the virsh command executed
        status = output.exit_status

        # Get status_error option for the test case
        status_error = params.get("status_error")
        if status_error == "yes":
            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    raise error.TestFail("Command 'virsh nodememstats' "
                                         "succeeded with libvirtd service"
                                         " stopped, incorrect")
                else:
                    raise error.TestFail("Command 'virsh nodememstats %s' "
                                         "succeeded (incorrect command)"
                                         % option)

        elif status_error == "no":
            if status == 0:
                # From the beginning of a line, group 1 is one or
                # more word-characters, followed by zero or more
                # whitespace characters and a ':', then one or
                # more whitespace characters, followed by group 2,
                # which is one or more digit characters,
                # then one or more whitespace characters followed by
                # a literal 'kB' or 'KiB' sequence, e.g as below
                # total  :              3809340 kB
                # total  :              3809340 KiB
                # Normalise the value to MBs
                regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)\s\w+")
                expected = {}

                for line in output.stdout.split('\n'):
                    match_obj = regex_obj.search(line)
                    # Due to the extra space in the list
                    if match_obj is not None:
                        name = match_obj.group(1)
                        value = match_obj.group(2)
                        expected[name] = int(value) / 1024

                # Get the actual value from /proc/meminfo and normalise to MBs
                actual['total'] = int(utils_memory.memtotal()) / 1024
                actual['free'] = int(utils_memory.freememtotal()) / 1024
                actual['buffers'] = int(
                    utils_memory.read_from_meminfo('Buffers')) / 1024
                actual['cached'] = int(
                    utils_memory.read_from_meminfo('Cached')) / 1024

                # Currently the delta value is kept at 200 MB this can be
                # tuned based on the accuracy
                # Check subtests.cfg for more details
                delta = int(params.get("delta"))
                output = virsh_check_nodememtats(actual, expected, delta)
                deltas.append(output)

            else:
                raise error.TestFail("Command virsh nodememstats %s not "
                                     "succeeded:\n%s" % (option, status))

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Print the deviated values for all iterations
    if status_error == "no":
        logging.debug("The following is the deviations from "
                      "the actual(/proc/meminfo) and expected"
                      " value(output of virsh nodememstats)")

        for i in range(itr):
            logging.debug("iteration %d:", i)
            for index, name in enumerate(name_stats):
                logging.debug("%19s : %d", name, deltas[i][name])
コード例 #22
0
def run(test, params, env):
    """
    Test the command virsh memtune

    (1) To get the current memtune parameters
    (2) Change the parameter values
    (3) Check the memtune query updated with the values
    (4) Check whether the mounted cgroup path gets the updated value
    (5) Login to guest and use the memory greater that the assigned value
        and check whether it kills the vm.
    (6) TODO:Check more values and robust scenarios.
    """

    def check_limit(path, expected_value, limit_name):
        """
        Matches the expected and actual output
        (1) Match the output of the virsh memtune
        (2) Match the output of the respective cgroup fs value

        :params: path: memory controller path for a domain
        :params: expected_value: the expected limit value
        :params: limit_name: the limit to be checked
                             hard_limit/soft_limit/swap_hard_limit
        :return: True or False based on the checks
        """
        status_value = True
        # Check 1
        actual_value = virsh.memtune_get(domname, limit_name)
        if actual_value == -1:
            raise error.TestFail("the key %s not found in the "
                                 "virsh memtune output" % limit_name)
        if actual_value != int(expected_value):
            status_value = False
            logging.error("%s virsh output:\n\tExpected value:%d"
                          "\n\tActual value: "
                          "%d", limit_name,
                          int(expected_value), int(actual_value))

        # Check 2
        if limit_name == 'hard_limit':
            cg_file_name = '%s/memory.limit_in_bytes' % path
        elif limit_name == 'soft_limit':
            cg_file_name = '%s/memory.soft_limit_in_bytes' % path
        elif limit_name == 'swap_hard_limit':
            cg_file_name = '%s/memory.memsw.limit_in_bytes' % path

        cg_file = None
        try:
            try:
                cg_file = open(cg_file_name)
                output = cg_file.read()
                value = int(output) / 1024
                if int(expected_value) != int(value):
                    status_value = False
                    logging.error("%s cgroup fs:\n\tExpected Value: %d"
                                  "\n\tActual Value: "
                                  "%d", limit_name,
                                  int(expected_value), int(value))
            except IOError:
                status_value = False
                logging.error("Error while reading:\n%s", cg_file_name)
        finally:
            if cg_file is not None:
                cg_file.close()

        return status_value

    # Get the vm name, pid of vm and check for alive
    domname = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()
    logging.info("Verify valid cgroup path for VM pid: %s", pid)

    # Resolve the memory cgroup path for a domain
    path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")

    # Set the initial memory starting value for test case
    # By default set 1GB less than the total memory
    # In case of total memory is less than 1GB set to 256MB
    # visit subtests.cfg to change these default values
    Memtotal = utils_memory.read_from_meminfo('MemTotal')
    base_mem = params.get("memtune_base_mem")

    if int(Memtotal) < int(base_mem):
        Mem = int(params.get("memtune_min_mem"))
    else:
        Mem = int(Memtotal) - int(base_mem)

    # Initialize error counter
    error_counter = 0

    # Check for memtune command is available in the libvirt version under test
    if not virsh.has_help_command("memtune"):
        raise error.TestNAError(
            "Memtune not available in this libvirt version")

    # Run test case with 100kB increase in memory value for each iteration
    while (Mem < Memtotal):
        if virsh.has_command_help_match("memtune", "hard-limit"):
            hard_mem = Mem - int(params.get("memtune_hard_base_mem"))
            options = " --hard-limit %d --live" % hard_mem
            virsh.memtune_set(domname, options)
            if not check_limit(path, hard_mem, "hard_limit"):
                error_counter += 1
        else:
            raise error.TestNAError("harlimit option not available in memtune "
                                    "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "soft-limit"):
            soft_mem = Mem - int(params.get("memtune_soft_base_mem"))
            options = " --soft-limit %d --live" % soft_mem
            virsh.memtune_set(domname, options)
            if not check_limit(path, soft_mem, "soft_limit"):
                error_counter += 1
        else:
            raise error.TestNAError("softlimit option not available in memtune "
                                    "cmd in this libvirt version")

        if virsh.has_command_help_match("memtune", "swap-hard-limit"):
            swaphard = Mem
            options = " --swap-hard-limit %d --live" % swaphard
            virsh.memtune_set(domname, options)
            if not check_limit(path, swaphard, "swap_hard_limit"):
                error_counter += 1
        else:
            raise error.TestNAError("swaplimit option not available in memtune "
                                    "cmd in this libvirt version")
        Mem += int(params.get("memtune_hard_base_mem"))

    # Raise error based on error_counter
    if error_counter > 0:
        raise error.TestFail(
            "Test failed, consult the previous error messages")
コード例 #23
0
def run(test, params, env):
    """
    Run stress as a memory stress in guest for THP testing

    :param test: QEMU test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """

    nr_ah = []

    debugfs_flag = 1
    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
    mem = int(params.get("mem"))
    qemu_mem = int(params.get("qemu_mem", "64"))
    hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages")

    error_context.context("smoke test setup")
    if not os.path.ismount(debugfs_path):
        if not os.path.isdir(debugfs_path):
            os.makedirs(debugfs_path)
        try:
            process.system("mount -t debugfs none %s" % debugfs_path,
                           shell=True)
        except Exception:
            debugfs_flag = 0

    try:
        # Allocated free memory to hugetlbfs
        mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024
        mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024
        hugepage_size = (int(utils_memory.read_from_meminfo('Hugepagesize')) /
                         1024)
        nr_hugetlbfs = (mem_free + mem_swap - mem - qemu_mem) / hugepage_size
        fd = open(hugetlbfs_path, "w")
        fd.write(str(nr_hugetlbfs))
        fd.close()

        error_context.context("Memory stress test")

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        if nr_ah[0] <= 0:
            test.fail("VM is not using transparent hugepage")

        # Run stress memory heavy in guest
        memory_stress_test = params['thp_memory_stress']
        utils_test.run_virt_sub_test(test,
                                     params,
                                     env,
                                     sub_type=memory_stress_test)

        nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages')))
        logging.debug("The huge page using for guest is: %s" % nr_ah)

        if nr_ah[1] <= nr_ah[0]:
            logging.warn(
                "VM don't use transparent hugepage while memory stress")

        if debugfs_flag == 1:
            if int(open(hugetlbfs_path, 'r').read()) <= 0:
                test.fail("KVM doesn't use transparenthugepage")

        logging.info("memory stress test finished")
    finally:
        error_context.context("all tests cleanup")
        fd = open(hugetlbfs_path, "w")
        fd.write("0")
        fd.close()
        if os.path.ismount(debugfs_path):
            process.run("umount %s" % debugfs_path, shell=True)
        if os.path.isdir(debugfs_path):
            os.removedirs(debugfs_path)