def check_ksm(mem, stress=False): """ :param mem: Boot guest with given memory, in KB :param stress: Load stress or not """ params['mem'] = mem // 1024 params['start_vm'] = 'yes' vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.wait_for_login() if stress: params['stress_args'] = ('--cpu 4 --io 4 --vm 2 --vm-bytes %sM' % (int(params['mem']) // 2)) stress_test = VMStress(vm, "stress", params) stress_test.load_stress_tool() time.sleep(30) qemu_pid = vm.get_pid() qemu_used_page = utils_misc.normalize_data_size(process.getoutput( params['cmd_get_qemu_used_mem'] % qemu_pid, shell=True) + 'K', 'B') pagesize = utils_memory.getpagesize() qemu_used_mem = int(float(qemu_used_page)) * pagesize free_mem_host = utils_memory.freememtotal() ksm_status = process.getoutput(params['cmd_check_ksm_status']) vm.destroy() logging.info('The ksm threshold is %s, the memory allocated by qemu is' ' %s, and the total free memory on host is %s.' % (ksm_thres, qemu_used_mem, free_mem_host)) if free_mem_host >= ksm_thres: if ksm_status != '0': test.fail('Ksm should not start.') if stress: test.error('The host resource is not consumed as expected.') elif ksm_status == '0': test.fail('Ksm should start but it does not.')
def run(test, params, env): """ Check KSM can be started automaticly when ksmtuned threshold is reached 1. Get the memory of your host and the KSM_THRES_COEF 2. Boot a guest with memory less than KSM_THRES_COEF threshold 3. Get the memory used in host of process qemu-kvm 4. Get the free memory in host 5. If both the free memory size is not smaller than the threshold and guest used memory + threshold is not bigger than total memory in host. Check the ksm status in host. Ksm should not start in the host 6. Repeat step 2~5 under it broke the rule in step 5 :param test: kvm test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def check_ksm(mem, threshold_reached=False): """ :param mem: Boot guest with given memory, in KB :ksmtuned_enabled: ksmtuned threshold is reached or not """ def heavyload_install(): if session.cmd_status(test_install_cmd) != 0: logging.warning("Could not find installed heavyload in guest, " "will install it via winutils.iso ") winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") install_cmd = params["install_cmd"] % winutil_drive session.cmd(install_cmd) def check_qemu_used_mem(qemu_pid, mem): qemu_used_page = process.getoutput(get_qemu_used_mem % qemu_pid, shell=True) qemu_used_mem = float(qemu_used_page) * pagesize if qemu_used_mem < mem * mem_thres: return False return True params['mem'] = mem // 1024 params['start_vm'] = 'yes' vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login() qemu_pid = vm.get_pid() if params["os_type"] == "linux": params['stress_args'] = ('--cpu 4 --io 4 --vm 2 --vm-bytes %sM' % (int(params['mem']) // 2)) stress_test = VMStress(vm, "stress", params) stress_test.load_stress_tool() else: install_path = params["install_path"] test_install_cmd = 'dir "%s" | findstr /I heavyload' % install_path heavyload_install() heavyload_bin = r'"%s\heavyload.exe" ' % install_path heavyload_options = ["/MEMORY 100", "/START"] start_cmd = heavyload_bin + " ".join(heavyload_options) stress_tool = BackgroundTest( session.cmd, (start_cmd, stress_timeout, stress_timeout)) stress_tool.start() if not utils_misc.wait_for(stress_tool.is_alive, stress_timeout): test.error("Failed to start heavyload process") if not utils_misc.wait_for(lambda: check_qemu_used_mem(qemu_pid, mem), stress_timeout, 10, 10): test.error("QEMU used memory doesn't reach %s of guest mem %sM in " "%ss" % (mem_thres, mem // 1024, stress_timeout)) time.sleep(30) free_mem_host = utils_memory.freememtotal() ksm_status = process.getoutput(params['cmd_check_ksm_status']) vm.destroy() logging.info( "The ksm threshold is %sM, QEMU used memory is %sM, " "and the total free memory on host is %sM", ksm_thres // 1024, mem // 1024, free_mem_host // 1024) if threshold_reached: if free_mem_host > ksm_thres: test.error("Host memory is not consumed as much as expected") if ksm_status == '0': test.fail("KSM should be running") else: if free_mem_host < ksm_thres: test.error("Host memory is consumed too much more than " "expected") if ksm_status != '0': test.fail("KSM should not be running") total_mem_host = utils_memory.memtotal() utils_memory.drop_caches() free_mem_host = utils_memory.freememtotal() ksm_thres = process.getoutput(params['cmd_get_thres'], shell=True) ksm_thres = int(total_mem_host * (int(re.findall('\\d+', ksm_thres)[0]) / 100)) guest_mem = (free_mem_host - ksm_thres) // 2 if arch.ARCH in ('ppc64', 'ppc64le'): guest_mem = guest_mem - guest_mem % (256 * 1024) status_ksm_service = process.system(params['cmd_status_ksmtuned'], ignore_status=True) if status_ksm_service != 0: process.run(params['cmd_start_ksmtuned']) stress_timeout = params.get("stress_timeout", 1800) mem_thres = float(params.get("mem_thres", 0.95)) get_qemu_used_mem = params['cmd_get_qemu_used_mem'] pagesize = utils_memory.getpagesize() check_ksm(guest_mem) ksm_config_file = params['ksm_config_file'] backup_file = ksm_config_file + '.backup' copyfile(ksm_config_file, backup_file) threshold = params.get_numeric('ksm_threshold') with open(ksm_config_file, "a+") as f: f.write('%s=%s' % (params['ksm_thres_conf'], threshold)) process.run(params['cmd_restart_ksmtuned']) ksm_thres = total_mem_host * (threshold / 100) guest_mem = total_mem_host - ksm_thres // 2 if arch.ARCH in ('ppc64', 'ppc64le'): guest_mem = guest_mem - guest_mem % (256 * 1024) try: check_ksm(guest_mem, threshold_reached=True) finally: copyfile(backup_file, ksm_config_file) os.remove(backup_file) if status_ksm_service != 0: process.run(params['cmd_stop_ksmtuned']) else: process.run(params['cmd_restart_ksmtuned'])
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel("Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: option = re.sub('_', '-', option) if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(utils_memory.getpagesize() - 1) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain cgtest = libvirt_cgroup.CgroupTest(pid) path = cgtest.get_cgroup_path("memory") logging.debug("cgroup path is %s", path) global mem_cgroup_info mem_cgroup_info = cgtest.get_cgroup_file_mapping(virsh_cmd='memtune') logging.debug("memtune cgroup info is %s", mem_cgroup_info) # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = mem_cgroup_info[mt_type] options = " --%s %s --live" % (re.sub('_', '-', mt_type), mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Tests KSM (Kernel Shared Memory) capability by allocating and filling KVM guests memory using various values. KVM sets the memory as MADV_MERGEABLE so all VM's memory can be merged. The workers in guest writes to tmpfs filesystem thus allocations are not limited by process max memory, only by VM's memory. Two test modes are supported - serial and parallel. Serial mode - uses multiple VMs, allocates memory per guest and always verifies the correct number of shared memory. 0) Prints out the setup and initialize guest(s) 1) Fills guest with the same number (S1) 2) Random fill on the first guest 3) Random fill of the remaining VMs one by one until the memory is completely filled (KVM stops machines which asks for additional memory until there is available memory) (S2, shouldn't finish) 4) Destroy all VMs but the last one 5) Checks the last VMs memory for corruption Parallel mode - uses one VM with multiple allocator workers. Executes scenarios in parallel to put more stress on the KVM. 0) Prints out the setup and initialize guest(s) 1) Fills memory with the same number (S1) 2) Fills memory with random numbers (S2) 3) Verifies all pages 4) Fills memory with the same number (S2) 5) Changes the last 96B (S3) Scenarios: S1) Fill all vms with the same value (all pages should be merged into 1) S2) Random fill (all pages should be splitted) S3) Fill last 96B (change only last 96B of each page; some pages will be merged; there was a bug with data corruption) Every worker has unique random key so we are able to verify the filled values. :param test: kvm test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. :param cfg: ksm_swap - use swap? :param cfg: ksm_overcommit_ratio - memory overcommit (serial mode only) :param cfg: ksm_parallel_ratio - number of workers (parallel mode only) :param cfg: ksm_host_reserve - override memory reserve on host in MB :param cfg: ksm_guest_reserve - override memory reserve on guests in MB :param cfg: ksm_mode - test mode {serial, parallel} :param cfg: ksm_perf_ratio - performance ratio, increase it when your machine is too slow """ def _start_allocator(vm, session, timeout): """ Execute ksm_overcommit_guest.py on guest, wait until it's initialized. :param vm: VM object. :param session: Remote session to a VM object. :param timeout: Timeout that will be used to verify if ksm_overcommit_guest.py started properly. """ logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name) session.sendline("$(command -v python python3 | head -1) " "/tmp/ksm_overcommit_guest.py") try: session.read_until_last_line_matches(["PASS:"******"FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as details: e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" % (vm.name, str(details))) test.fail(e_msg) def _execute_allocator(command, vm, session, timeout): """ Execute a given command on ksm_overcommit_guest.py main loop, indicating the vm the command was executed on. :param command: Command that will be executed. :param vm: VM object. :param session: Remote session to VM object. :param timeout: Timeout used to verify expected output. :return: Tuple (match index, data) """ logging.debug( "Executing '%s' on ksm_overcommit_guest.py loop, " "vm: %s, timeout: %s", command, vm.name, timeout) session.sendline(command) try: (match, data) = session.read_until_last_line_matches(["PASS:"******"FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as details: e_msg = ("Failed to execute command '%s' on " "ksm_overcommit_guest.py, vm '%s': %s" % (command, vm.name, str(details))) test.fail(e_msg) return (match, data) def get_ksmstat(): """ Return sharing memory by ksm in MB :return: memory in MB """ fpages = open('/sys/kernel/mm/ksm/pages_sharing') ksm_pages = int(fpages.read()) fpages.close() sharing_mem = ksm_pages * pagesize return int(float(utils_misc.normalize_data_size("%sK" % sharing_mem))) def initialize_guests(): """ Initialize guests (fill their memories with specified patterns). """ logging.info("Phase 1: filling guest memory pages") for session in lsessions: vm = lvms[lsessions.index(session)] logging.debug("Turning off swap on vm %s", vm.name) session.cmd("swapoff -a", timeout=300) # Start the allocator _start_allocator(vm, session, 60 * perf_ratio) # Execute allocator on guests for i in range(0, vmsc): vm = lvms[i] cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i]) _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio) cmd = "mem.value_fill(%d)" % skeys[0] _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * 2 * perf_ratio) # Let ksm_overcommit_guest.py do its job # (until shared mem reaches expected value) shm = 0 j = 0 logging.debug("Target shared meminfo for guest %s: %s", vm.name, ksm_size) while ((new_ksm and (shm < (ksm_size * (i + 1)))) or (not new_ksm and (shm < (ksm_size)))): if j > 256: logging.debug(utils_test.get_memory_info(lvms)) test.error("SHM didn't merge the memory until " "the DL on guest: %s" % vm.name) pause = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding...", pause) time.sleep(pause) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug( "Shared meminfo for guest %s after " "iteration %s: %s", vm.name, j, shm) j += 1 # Keep some reserve pause = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding...", pause) time.sleep(pause) logging.debug(utils_test.get_memory_info(lvms)) logging.info("Phase 1: PASS") def separate_first_guest(): """ Separate memory of the first guest by generating special random series """ logging.info("Phase 2: Split the pages on the first guest") cmd = "mem.static_random_fill()" data = _execute_allocator(cmd, lvms[0], lsessions[0], fill_base_timeout * 2 * perf_ratio)[1] r_msg = data.splitlines()[-1] logging.debug("Return message of static_random_fill: %s", r_msg) out = int(r_msg.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size, out, (ksm_size * 1000 / out)) logging.debug(utils_test.get_memory_info(lvms)) logging.debug("Phase 2: PASS") def split_guest(): """ Sequential split of pages on guests up to memory limit """ logging.info("Phase 3a: Sequential split of pages on guests up to " "memory limit") last_vm = 0 session = None vm = None for i in range(1, vmsc): # Check VMs for j in range(0, vmsc): if not lvms[j].is_alive: e_msg = ("VM %d died while executing static_random_fill on" " VM %d in allocator loop" % (j, i)) test.fail(e_msg) vm = lvms[i] session = lsessions[i] cmd = "mem.static_random_fill()" logging.debug( "Executing %s on ksm_overcommit_guest.py loop, " "vm: %s", cmd, vm.name) session.sendline(cmd) out = "" try: logging.debug("Watching host mem while filling vm %s memory", vm.name) while (not out.startswith("PASS") and not out.startswith("FAIL")): if not vm.is_alive(): e_msg = ("VM %d died while executing " "static_random_fill on allocator loop" % i) test.fail(e_msg) free_mem = int(utils_memory.read_from_meminfo("MemFree")) if (ksm_swap): free_mem = ( free_mem + int(utils_memory.read_from_meminfo("SwapFree"))) logging.debug("Free memory on host: %d", free_mem) # We need to keep some memory for python to run. if (free_mem < 64000) or (ksm_swap and free_mem < (450000 * perf_ratio)): vm.pause() for j in range(0, i): lvms[j].destroy(gracefully=False) time.sleep(20) vm.resume() logging.debug("Only %s free memory, killing %d guests", free_mem, (i - 1)) last_vm = i out = session.read_nonblocking(0.1, 1) time.sleep(2) except OSError: logging.debug("Only %s host free memory, killing %d guests", free_mem, (i - 1)) logging.debug("Stopping %s", vm.name) vm.pause() for j in range(0, i): logging.debug("Destroying %s", lvms[j].name) lvms[j].destroy(gracefully=False) time.sleep(20) vm.resume() last_vm = i if last_vm != 0: break logging.debug("Memory filled for guest %s", vm.name) logging.info("Phase 3a: PASS") logging.info("Phase 3b: Verify memory of the max stressed VM") for i in range(last_vm + 1, vmsc): lsessions[i].close() if i == (vmsc - 1): logging.debug(utils_test.get_memory_info([lvms[i]])) logging.debug("Destroying guest %s", lvms[i].name) lvms[i].destroy(gracefully=False) # Verify last machine with randomly generated memory cmd = "mem.static_random_verify()" _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio)) logging.debug(utils_test.get_memory_info([lvms[last_vm]])) lsessions[last_vm].cmd_output("die()", 20) lvms[last_vm].destroy(gracefully=False) logging.info("Phase 3b: PASS") def split_parallel(): """ Parallel page spliting """ logging.info("Phase 1: parallel page spliting") # We have to wait until allocator is finished (it waits 5 seconds to # clean the socket session = lsessions[0] vm = lvms[0] for i in range(1, max_alloc): lsessions.append(vm.wait_for_login(timeout=360)) session.cmd("swapoff -a", timeout=300) for i in range(0, max_alloc): # Start the allocator _start_allocator(vm, lsessions[i], 60 * perf_ratio) logging.info("Phase 1: PASS") logging.info("Phase 2a: Simultaneous merging") logging.debug("Memory used by allocator on guests = %dMB", (ksm_size / max_alloc)) for i in range(0, max_alloc): cmd = "mem = MemFill(%d, %s, %s)" % ( (ksm_size / max_alloc), skeys[i], dkeys[i]) _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio) cmd = "mem.value_fill(%d)" % (skeys[0]) _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * perf_ratio) # Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3) shm = 0 i = 0 logging.debug("Target shared memory size: %s", ksm_size) while (shm < ksm_size): if i > 64: logging.debug(utils_test.get_memory_info(lvms)) test.error("SHM didn't merge the memory until DL") pause = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceed...", pause) time.sleep(pause) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug("Shared meminfo after attempt %s: %s", i, shm) i += 1 logging.debug(utils_test.get_memory_info([vm])) logging.info("Phase 2a: PASS") logging.info("Phase 2b: Simultaneous spliting") # Actual splitting for i in range(0, max_alloc): cmd = "mem.static_random_fill()" data = _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * perf_ratio)[1] data = data.splitlines()[-1] logging.debug(data) out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", (ksm_size / max_alloc), out, (ksm_size * 1000 / out / max_alloc)) logging.debug(utils_test.get_memory_info([vm])) logging.info("Phase 2b: PASS") logging.info("Phase 2c: Simultaneous verification") for i in range(0, max_alloc): cmd = "mem.static_random_verify()" data = _execute_allocator(cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio))[1] logging.info("Phase 2c: PASS") logging.info("Phase 2d: Simultaneous merging") # Actual splitting for i in range(0, max_alloc): cmd = "mem.value_fill(%d)" % skeys[0] data = _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * 2 * perf_ratio)[1] logging.debug(utils_test.get_memory_info([vm])) logging.info("Phase 2d: PASS") logging.info("Phase 2e: Simultaneous verification") for i in range(0, max_alloc): cmd = "mem.value_check(%d)" % skeys[0] data = _execute_allocator(cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio))[1] logging.info("Phase 2e: PASS") logging.info("Phase 2f: Simultaneous spliting last 96B") for i in range(0, max_alloc): cmd = "mem.static_random_fill(96)" data = _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * perf_ratio)[1] data = data.splitlines()[-1] out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size / max_alloc, out, (ksm_size * 1000 / out / max_alloc)) logging.debug(utils_test.get_memory_info([vm])) logging.info("Phase 2f: PASS") logging.info("Phase 2g: Simultaneous verification last 96B") for i in range(0, max_alloc): cmd = "mem.static_random_verify(96)" _, data = _execute_allocator(cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio)) logging.debug(utils_test.get_memory_info([vm])) logging.info("Phase 2g: PASS") logging.debug("Cleaning up...") for i in range(0, max_alloc): lsessions[i].cmd_output("die()", 20) session.close() vm.destroy(gracefully=False) # Main test code logging.info("Starting phase 0: Initialization") if process.run("ps -C ksmtuned", ignore_status=True).exit_status == 0: logging.info("Killing ksmtuned...") process.run("killall ksmtuned") new_ksm = False if (os.path.exists("/sys/kernel/mm/ksm/run")): process.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs", shell=True) process.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan", shell=True) process.run("echo 1 > /sys/kernel/mm/ksm/run", shell=True) e_up = "/sys/kernel/mm/transparent_hugepage/enabled" e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled" if os.path.exists(e_up): process.run("echo 'never' > %s" % e_up, shell=True) if os.path.exists(e_rh): process.run("echo 'never' > %s" % e_rh, shell=True) new_ksm = True else: try: process.run("modprobe ksm") process.run("ksmctl start 5000 100") except process.CmdError as details: test.fail("Failed to load KSM: %s" % details) # host_reserve: mem reserve kept for the host system to run host_reserve = int(params.get("ksm_host_reserve", -1)) if (host_reserve == -1): try: available = utils_memory.read_from_meminfo("MemAvailable") except process.CmdError: # ancient kernels utils_memory.drop_caches() available = utils_memory.read_from_meminfo("MemFree") # default host_reserve = UsedMem + one_minimal_guest(128MB) # later we add 64MB per additional guest host_reserve = ((utils_memory.memtotal() - available) / 1024 + 128) # using default reserve _host_reserve = True else: _host_reserve = False # guest_reserve: mem reserve kept to avoid guest OS to kill processes guest_reserve = int(params.get("ksm_guest_reserve", -1)) if (guest_reserve == -1): # In case of OOM, set guest_reserve to 1536M guest_reserve = 1536 # using default reserve _guest_reserve = True else: _guest_reserve = False max_vms = int(params.get("max_vms", 2)) overcommit = float(params.get("ksm_overcommit_ratio", 2.0)) max_alloc = int(params.get("ksm_parallel_ratio", 1)) # vmsc: count of all used VMs vmsc = int(overcommit) + 1 vmsc = max(vmsc, max_vms) if (params['ksm_mode'] == "serial"): max_alloc = vmsc if _host_reserve: # First round of additional guest reserves host_reserve += vmsc * 64 _host_reserve = vmsc host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve) pagesize = utils_memory.getpagesize() ksm_swap = False if params.get("ksm_swap") == "yes": ksm_swap = True # Performance ratio perf_ratio = params.get("ksm_perf_ratio") if perf_ratio: perf_ratio = float(perf_ratio) else: perf_ratio = 1 if (params['ksm_mode'] == "parallel"): vmsc = 1 overcommit = 1 mem = host_mem # 32bit system adjustment if "64" not in params.get("vm_arch_name"): logging.debug("Probably i386 guest architecture, " "max allocator mem = 2G") # Guest can have more than 2G but # kvm mem + 1MB (allocator itself) can't if (host_mem > 3100): mem = 3100 if os.popen("uname -i").readline().startswith("i386"): logging.debug("Host is i386 architecture, max guest mem is 2G") # Guest system with qemu overhead (64M) can't have more than 2G if mem > 3100 - 64: mem = 3100 - 64 else: # mem: Memory of the guest systems. Maximum must be less than # host's physical ram mem = int(overcommit * host_mem / vmsc) # If guest memory is too much, it will take very very long time for ksm # to scan and merge same pages, sometimes will hit timeout if mem > 8192: mem = 8192 # 32bit system adjustment if params["vm_arch_name"] == 'i686': logging.debug("Probably i386 guest architecture, " "max allocator mem = 2G") # Guest can have more than 2G but # kvm mem + 1MB (allocator itself) can't if mem - guest_reserve - 1 > 3100: vmsc = int( math.ceil( (host_mem * overcommit) / (3100 + guest_reserve))) if _host_reserve: host_reserve += (vmsc - _host_reserve) * 64 host_mem -= (vmsc - _host_reserve) * 64 _host_reserve = vmsc mem = int(math.floor(host_mem * overcommit / vmsc)) if os.popen("uname -i").readline().startswith("i386"): logging.debug("Host is i386 architecture, max guest mem is 2G") # Guest system with qemu overhead (64M) can't have more than 2G if mem > 3100 - 64: vmsc = int(math.ceil((host_mem * overcommit) / (3100 - 64.0))) if _host_reserve: host_reserve += (vmsc - _host_reserve) * 64 host_mem -= (vmsc - _host_reserve) * 64 _host_reserve = vmsc mem = int(math.floor(host_mem * overcommit / vmsc)) # 0.055 represents OS + TMPFS additional reserve per guest ram MB if _guest_reserve: guest_reserve += math.ceil(mem * 0.055) swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024 logging.debug("Overcommit = %f", overcommit) logging.debug("True overcommit = %f ", (float(vmsc * mem) / float(host_mem))) logging.debug("Host memory = %dM", host_mem) logging.debug("Guest memory = %dM", mem) logging.debug("Using swap = %s", ksm_swap) logging.debug("Swap = %dM", swap) logging.debug("max_vms = %d", max_vms) logging.debug("Count of all used VMs = %d", vmsc) logging.debug("Performance_ratio = %f", perf_ratio) # Generate unique keys for random series skeys = [] dkeys = [] for i in range(0, max(vmsc, max_alloc)): key = random.randrange(0, 255) while key in skeys: key = random.randrange(0, 255) skeys.append(key) key = random.randrange(0, 999) while key in dkeys: key = random.randrange(0, 999) dkeys.append(key) logging.debug("skeys: %s", skeys) logging.debug("dkeys: %s", dkeys) lvms = [] lsessions = [] # As we don't know the number and memory amount of VMs in advance, # we need to specify and create them here vm_name = params["main_vm"] params['mem'] = mem params['vms'] = vm_name # ksm_size: amount of memory used by allocator ksm_size = mem - guest_reserve logging.debug("Memory used by allocator on guests = %dM", ksm_size) fill_base_timeout = ksm_size / 10 # Creating the first guest env_process.preprocess_vm(test, params, env, vm_name) lvms.append(env.get_vm(vm_name)) if not lvms[0]: test.error("VM object not found in environment") if not lvms[0].is_alive(): test.error("VM seems to be dead; Test requires a living VM") logging.debug("Booting first guest %s", lvms[0].name) lsessions.append(lvms[0].wait_for_login(timeout=360)) # Creating other guest systems for i in range(1, vmsc): vm_name = "vm" + str(i + 1) # Last VM is later used to run more allocators simultaneously lvms.append(lvms[0].clone(vm_name, params)) env.register_vm(vm_name, lvms[i]) params['vms'] += " " + vm_name logging.debug("Booting guest %s", lvms[i].name) lvms[i].create() if not lvms[i].is_alive(): test.error("VM %s seems to be dead; Test requires a" "living VM" % lvms[i].name) lsessions.append(lvms[i].wait_for_login(timeout=360)) # Let guests rest a little bit :-) pause = vmsc * 2 * perf_ratio logging.debug("Waiting %ds before proceed", pause) time.sleep(vmsc * 2 * perf_ratio) logging.debug(utils_test.get_memory_info(lvms)) # Copy ksm_overcommit_guest.py into guests vksmd_src = os.path.join(data_dir.get_shared_dir(), "scripts", "ksm_overcommit_guest.py") dst_dir = "/tmp" for vm in lvms: vm.copy_files_to(vksmd_src, dst_dir) logging.info("Phase 0: PASS") if params['ksm_mode'] == "parallel": logging.info("Starting KSM test parallel mode") split_parallel() logging.info("KSM test parallel mode: PASS") elif params['ksm_mode'] == "serial": logging.info("Starting KSM test serial mode") initialize_guests() separate_first_guest() split_guest() logging.info("KSM test serial mode: PASS")