def initialize_guests(): """ Initialize guests (fill their memories with specified patterns). """ logging.info("Phase 1: filling guest memory pages") for session in lsessions: vm = lvms[lsessions.index(session)] logging.debug("Turning off swap on vm %s", vm.name) session.cmd("swapoff -a", timeout=300) # Start the allocator _start_allocator(vm, session, 60 * perf_ratio) # Execute allocator on guests for i in range(0, vmsc): vm = lvms[i] a_cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i]) _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) a_cmd = "mem.value_fill(%d)" % skeys[0] _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio) # Let ksm_overcommit_guest.py do its job # (until shared mem reaches expected value) shm = 0 j = 0 logging.debug("Target shared meminfo for guest %s: %s", vm.name, ksm_size) while ((new_ksm and (shm < (ksm_size * (i + 1)))) or (not new_ksm and (shm < (ksm_size)))): if j > 64: logging.debug(kvm_test_utils.get_memory_info(lvms)) raise error.TestError("SHM didn't merge the memory until " "the DL on guest: %s" % vm.name) st = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding...", st) time.sleep(st) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug( "Shared meminfo for guest %s after " "iteration %s: %s", vm.name, j, shm) j += 1 # Keep some reserve rt = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding...", rt) time.sleep(rt) logging.debug(kvm_test_utils.get_memory_info(lvms)) logging.info("Phase 1: PASS")
def initialize_guests(): """ Initialize guests (fill their memories with specified patterns). """ logging.info("Phase 1: filling guest memory pages") for session in lsessions: vm = lvms[lsessions.index(session)] logging.debug("Turning off swap on vm %s" % vm.name) session.cmd("swapoff -a", timeout=300) # Start the allocator _start_allocator(vm, session, 60 * perf_ratio) # Execute allocator on guests for i in range(0, vmsc): vm = lvms[i] a_cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i]) _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) a_cmd = "mem.value_fill(%d)" % skeys[0] _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio) # Let allocator.py do its job # (until shared mem reaches expected value) shm = 0 j = 0 logging.debug("Target shared meminfo for guest %s: %s", vm.name, ksm_size) while ((new_ksm and (shm < (ksm_size*(i+1)))) or (not new_ksm and (shm < (ksm_size)))): if j > 64: logging.debug(kvm_test_utils.get_memory_info(lvms)) raise error.TestError("SHM didn't merge the memory until " "the DL on guest: %s" % vm.name) st = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding..." % st) time.sleep(st) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug("Shared meminfo for guest %s after " "iteration %s: %s", vm.name, j, shm) j += 1 # Keep some reserve rt = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceeding...", rt) time.sleep(rt) logging.debug(kvm_test_utils.get_memory_info(lvms)) logging.info("Phase 1: PASS")
def separate_first_guest(): """ Separate memory of the first guest by generating special random series """ logging.info("Phase 2: Split the pages on the first guest") a_cmd = "mem.static_random_fill()" data = _execute_allocator(a_cmd, lvms[0], lsessions[0], 120 * perf_ratio)[1] r_msg = data.splitlines()[-1] logging.debug("Return message of static_random_fill: %s", r_msg) out = int(r_msg.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size, out, (ksm_size * 1000 / out)) logging.debug(kvm_test_utils.get_memory_info(lvms)) logging.debug("Phase 2: PASS")
def split_parallel(): """ Parallel page spliting """ logging.info("Phase 1: parallel page spliting") # We have to wait until allocator is finished (it waits 5 seconds to # clean the socket session = lsessions[0] vm = lvms[0] for i in range(1, max_alloc): lsessions.append(kvm_utils.wait_for(vm.remote_login, 360, 0, 2)) if not lsessions[i]: raise error.TestFail("Could not log into guest %s" % vm.name) session.cmd("swapoff -a", timeout=300) for i in range(0, max_alloc): # Start the allocator _start_allocator(vm, lsessions[i], 60 * perf_ratio) logging.info("Phase 1: PASS") logging.info("Phase 2a: Simultaneous merging") logging.debug("Memory used by allocator on guests = %dMB" % (ksm_size / max_alloc)) for i in range(0, max_alloc): a_cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc), skeys[i], dkeys[i]) _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) a_cmd = "mem.value_fill(%d)" % (skeys[0]) _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio) # Wait until allocator.py merges the pages (3 * ksm_size / 3) shm = 0 i = 0 logging.debug("Target shared memory size: %s", ksm_size) while (shm < ksm_size): if i > 64: logging.debug(kvm_test_utils.get_memory_info(lvms)) raise error.TestError("SHM didn't merge the memory until DL") wt = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceed...", wt) time.sleep(wt) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug("Shared meminfo after attempt %s: %s", i, shm) i += 1 logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2a: PASS") logging.info("Phase 2b: Simultaneous spliting") # Actual splitting for i in range(0, max_alloc): a_cmd = "mem.static_random_fill()" (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio) data = data.splitlines()[-1] logging.debug(data) out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s" % ((ksm_size / max_alloc), out, (ksm_size * 1000 / out / max_alloc))) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2b: PASS") logging.info("Phase 2c: Simultaneous verification") for i in range(0, max_alloc): a_cmd = "mem.static_random_verify()" (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio)) logging.info("Phase 2c: PASS") logging.info("Phase 2d: Simultaneous merging") # Actual splitting for i in range(0, max_alloc): a_cmd = "mem.value_fill(%d)" % skeys[0] (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2d: PASS") logging.info("Phase 2e: Simultaneous verification") for i in range(0, max_alloc): a_cmd = "mem.value_check(%d)" % skeys[0] (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio)) logging.info("Phase 2e: PASS") logging.info("Phase 2f: Simultaneous spliting last 96B") for i in range(0, max_alloc): a_cmd = "mem.static_random_fill(96)" (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) data = data.splitlines()[-1] out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size/max_alloc, out, (ksm_size * 1000 / out / max_alloc)) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2f: PASS") logging.info("Phase 2g: Simultaneous verification last 96B") for i in range(0, max_alloc): a_cmd = "mem.static_random_verify(96)" (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2g: PASS") logging.debug("Cleaning up...") for i in range(0, max_alloc): lsessions[i].cmd_output("die()", 20) session.close() vm.destroy(gracefully = False)
lvms[j].destroy(gracefully = False) time.sleep(20) vm.monitor.cmd("c") last_vm = i if last_vm != 0: break logging.debug("Memory filled for guest %s" % (vm.name)) logging.info("Phase 3a: PASS") logging.info("Phase 3b: Check if memory in max loading guest is right") for i in range(last_vm + 1, vmsc): lsessions[i].close() if i == (vmsc - 1): logging.debug(kvm_test_utils.get_memory_info([lvms[i]])) logging.debug("Destroying guest %s" % lvms[i].name) lvms[i].destroy(gracefully = False) # Verify last machine with randomly generated memory a_cmd = "mem.static_random_verify()" _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]])) lsessions[i].cmd_output("die()", 20) lvms[last_vm].destroy(gracefully = False) logging.info("Phase 3b: PASS") def split_parallel():
def split_parallel(): """ Parallel page spliting """ logging.info("Phase 1: parallel page spliting") # We have to wait until allocator is finished (it waits 5 seconds to # clean the socket session = lsessions[0] vm = lvms[0] for i in range(1, max_alloc): lsessions.append(vm.wait_for_login(timeout=360)) session.cmd("swapoff -a", timeout=300) for i in range(0, max_alloc): # Start the allocator _start_allocator(vm, lsessions[i], 60 * perf_ratio) logging.info("Phase 1: PASS") logging.info("Phase 2a: Simultaneous merging") logging.debug("Memory used by allocator on guests = %dMB", (ksm_size / max_alloc)) for i in range(0, max_alloc): a_cmd = "mem = MemFill(%d, %s, %s)" % ( (ksm_size / max_alloc), skeys[i], dkeys[i]) _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio) a_cmd = "mem.value_fill(%d)" % (skeys[0]) _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio) # Wait until ksm_overcommit_guest.py merges the pages (3 * ksm_size / 3) shm = 0 i = 0 logging.debug("Target shared memory size: %s", ksm_size) while (shm < ksm_size): if i > 64: logging.debug(kvm_test_utils.get_memory_info(lvms)) raise error.TestError("SHM didn't merge the memory until DL") wt = ksm_size / 200 * perf_ratio logging.debug("Waiting %ds before proceed...", wt) time.sleep(wt) if (new_ksm): shm = get_ksmstat() else: shm = vm.get_shared_meminfo() logging.debug("Shared meminfo after attempt %s: %s", i, shm) i += 1 logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2a: PASS") logging.info("Phase 2b: Simultaneous spliting") # Actual splitting for i in range(0, max_alloc): a_cmd = "mem.static_random_fill()" data = _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio)[1] data = data.splitlines()[-1] logging.debug(data) out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", (ksm_size / max_alloc), out, (ksm_size * 1000 / out / max_alloc)) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2b: PASS") logging.info("Phase 2c: Simultaneous verification") for i in range(0, max_alloc): a_cmd = "mem.static_random_verify()" data = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio))[1] logging.info("Phase 2c: PASS") logging.info("Phase 2d: Simultaneous merging") # Actual splitting for i in range(0, max_alloc): a_cmd = "mem.value_fill(%d)" % skeys[0] data = _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio)[1] logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2d: PASS") logging.info("Phase 2e: Simultaneous verification") for i in range(0, max_alloc): a_cmd = "mem.value_check(%d)" % skeys[0] data = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio))[1] logging.info("Phase 2e: PASS") logging.info("Phase 2f: Simultaneous spliting last 96B") for i in range(0, max_alloc): a_cmd = "mem.static_random_fill(96)" data = _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio)[1] data = data.splitlines()[-1] out = int(data.split()[4]) logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size / max_alloc, out, (ksm_size * 1000 / out / max_alloc)) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2f: PASS") logging.info("Phase 2g: Simultaneous verification last 96B") for i in range(0, max_alloc): a_cmd = "mem.static_random_verify(96)" (match, data) = _execute_allocator(a_cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([vm])) logging.info("Phase 2g: PASS") logging.debug("Cleaning up...") for i in range(0, max_alloc): lsessions[i].cmd_output("die()", 20) session.close() vm.destroy(gracefully=False)
def split_guest(): """ Sequential split of pages on guests up to memory limit """ logging.info("Phase 3a: Sequential split of pages on guests up to " "memory limit") last_vm = 0 session = None vm = None for i in range(1, vmsc): # Check VMs for j in range(0, vmsc): if not lvms[j].is_alive: e_msg = ( "VM %d died while executing static_random_fill in " "VM %d on allocator loop" % (j, i)) raise error.TestFail(e_msg) vm = lvms[i] session = lsessions[i] a_cmd = "mem.static_random_fill()" logging.debug( "Executing %s on ksm_overcommit_guest.py loop, " "vm: %s", a_cmd, vm.name) session.sendline(a_cmd) out = "" try: logging.debug( "Watching host memory while filling vm %s memory", vm.name) while not out.startswith("PASS") and not out.startswith( "FAIL"): if not vm.is_alive(): e_msg = ( "VM %d died while executing static_random_fill" " on allocator loop" % i) raise error.TestFail(e_msg) free_mem = int(utils.read_from_meminfo("MemFree")) if (ksm_swap): free_mem = (free_mem + int(utils.read_from_meminfo("SwapFree"))) logging.debug("Free memory on host: %d", free_mem) # We need to keep some memory for python to run. if (free_mem < 64000) or (ksm_swap and free_mem < (450000 * perf_ratio)): vm.monitor.cmd("stop") for j in range(0, i): lvms[j].destroy(gracefully=False) time.sleep(20) vm.monitor.cmd("c") logging.debug("Only %s free memory, killing %d guests", free_mem, (i - 1)) last_vm = i break out = session.read_nonblocking(0.1) time.sleep(2) except OSError: logging.debug("Only %s host free memory, killing %d guests", free_mem, (i - 1)) logging.debug("Stopping %s", vm.name) vm.monitor.cmd("stop") for j in range(0, i): logging.debug("Destroying %s", lvms[j].name) lvms[j].destroy(gracefully=False) time.sleep(20) vm.monitor.cmd("c") last_vm = i if last_vm != 0: break logging.debug("Memory filled for guest %s", vm.name) logging.info("Phase 3a: PASS") logging.info("Phase 3b: Check if memory in max loading guest is right") for i in range(last_vm + 1, vmsc): lsessions[i].close() if i == (vmsc - 1): logging.debug(kvm_test_utils.get_memory_info([lvms[i]])) logging.debug("Destroying guest %s", lvms[i].name) lvms[i].destroy(gracefully=False) # Verify last machine with randomly generated memory a_cmd = "mem.static_random_verify()" _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]])) lsessions[i].cmd_output("die()", 20) lvms[last_vm].destroy(gracefully=False) logging.info("Phase 3b: PASS")
if not lvms[i].is_alive(): raise error.TestError("VM %s seems to be dead; Test requires a" "living VM" % lvms[i].name) lsessions.append(lvms[i].wait_for_login(timeout=360)) try: tmp = open(params.get('pid_' + vm_name), 'r') params['pid_' + vm_name] = int(tmp.readline()) except: raise error.TestFail("Could not get PID of %s" % (vm_name)) # Let guests rest a little bit :-) st = vmsc * 2 * perf_ratio logging.debug("Waiting %ds before proceed", st) time.sleep(vmsc * 2 * perf_ratio) logging.debug(kvm_test_utils.get_memory_info(lvms)) # Copy ksm_overcommit_guest.py into guests pwd = os.path.join(os.environ['AUTODIR'], 'tests/kvm') vksmd_src = os.path.join(pwd, "scripts/ksm_overcommit_guest.py") dst_dir = "/tmp" for vm in lvms: vm.copy_files_to(vksmd_src, dst_dir) logging.info("Phase 0: PASS") if params['ksm_mode'] == "parallel": logging.info("Starting KSM test parallel mode") split_parallel() logging.info("KSM test parallel mode: PASS") elif params['ksm_mode'] == "serial": logging.info("Starting KSM test serial mode")
def split_guest(): """ Sequential split of pages on guests up to memory limit """ logging.info("Phase 3a: Sequential split of pages on guests up to " "memory limit") last_vm = 0 session = None vm = None for i in range(1, vmsc): # Check VMs for j in range(0, vmsc): if not lvms[j].is_alive: e_msg = "VM %d died while executing static_random_fill in " "VM %d on allocator loop" % (j, i) raise error.TestFail(e_msg) vm = lvms[i] session = lsessions[i] a_cmd = "mem.static_random_fill()" logging.debug("Executing %s on ksm_overcommit_guest.py loop, " "vm: %s", a_cmd, vm.name) session.sendline(a_cmd) out = "" try: logging.debug("Watching host memory while filling vm %s memory", vm.name) while not out.startswith("PASS") and not out.startswith("FAIL"): if not vm.is_alive(): e_msg = "VM %d died while executing static_random_fill" " on allocator loop" % i raise error.TestFail(e_msg) free_mem = int(utils.read_from_meminfo("MemFree")) if ksm_swap: free_mem = free_mem + int(utils.read_from_meminfo("SwapFree")) logging.debug("Free memory on host: %d", free_mem) # We need to keep some memory for python to run. if (free_mem < 64000) or (ksm_swap and free_mem < (450000 * perf_ratio)): vm.monitor.cmd("stop") for j in range(0, i): lvms[j].destroy(gracefully=False) time.sleep(20) vm.monitor.cmd("c") logging.debug("Only %s free memory, killing %d guests", free_mem, (i - 1)) last_vm = i break out = session.read_nonblocking(0.1) time.sleep(2) except OSError: logging.debug("Only %s host free memory, killing %d guests", free_mem, (i - 1)) logging.debug("Stopping %s", vm.name) vm.monitor.cmd("stop") for j in range(0, i): logging.debug("Destroying %s", lvms[j].name) lvms[j].destroy(gracefully=False) time.sleep(20) vm.monitor.cmd("c") last_vm = i if last_vm != 0: break logging.debug("Memory filled for guest %s", vm.name) logging.info("Phase 3a: PASS") logging.info("Phase 3b: Check if memory in max loading guest is right") for i in range(last_vm + 1, vmsc): lsessions[i].close() if i == (vmsc - 1): logging.debug(kvm_test_utils.get_memory_info([lvms[i]])) logging.debug("Destroying guest %s", lvms[i].name) lvms[i].destroy(gracefully=False) # Verify last machine with randomly generated memory a_cmd = "mem.static_random_verify()" _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio)) logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]])) lsessions[i].cmd_output("die()", 20) lvms[last_vm].destroy(gracefully=False) logging.info("Phase 3b: PASS")
lvms[i].create() if not lvms[i].is_alive(): raise error.TestError("VM %s seems to be dead; Test requires a" "living VM" % lvms[i].name) lsessions.append(lvms[i].wait_for_login(timeout=360)) try: tmp = open(params.get("pid_" + vm_name), "r") params["pid_" + vm_name] = int(tmp.readline()) except: raise error.TestFail("Could not get PID of %s" % (vm_name)) # Let guests rest a little bit :-) st = vmsc * 2 * perf_ratio logging.debug("Waiting %ds before proceed", st) time.sleep(vmsc * 2 * perf_ratio) logging.debug(kvm_test_utils.get_memory_info(lvms)) # Copy ksm_overcommit_guest.py into guests pwd = os.path.join(os.environ["AUTODIR"], "tests/kvm") vksmd_src = os.path.join(pwd, "scripts/ksm_overcommit_guest.py") dst_dir = "/tmp" for vm in lvms: vm.copy_files_to(vksmd_src, dst_dir) logging.info("Phase 0: PASS") if params["ksm_mode"] == "parallel": logging.info("Starting KSM test parallel mode") split_parallel() logging.info("KSM test parallel mode: PASS") elif params["ksm_mode"] == "serial": logging.info("Starting KSM test serial mode")