def remove_machine_cgroup(): """ Remove machine/machine.slice cgroup by restart cgconfig and libvirtd """ cg_ser = utils_cgroup.CgconfigService() cg_ser.cgconfig_restart() libvirt_ser = service.Factory.create_specific_service("libvirtd") libvirt_ser.restart()
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() emulatorpin_placement = params.get("emulatorpin_placement", "") if emulatorpin_placement: vm.destroy() vmxml.placement = emulatorpin_placement vmxml.sync() try: vm.start() except VMStartError as detail: # Recover the VM and failout early vmxml_backup.sync() logging.debug("Used VM XML:\n %s", vmxml) test.fail("VM Fails to start: %s", detail) test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = cpu.online_cpus_count() test_dicts['host_cpus'] = host_cpus cpu_max = int(host_cpus) - 1 cpu_list = None # Assemble cpu list for positive test if status_error == "no": if cpulist is None: pass elif cpulist == "x": cpu_online_map = list(map(str, cpu.cpu_online_list())) cpulist = random.choice(cpu_online_map) elif cpulist == "x-y": # By default, emulator is pined to all cpus, and element # 'cputune/emulatorpin' may not exist in VM's XML. # And libvirt will do nothing if pin emulator to the same # cpus, that means VM's XML still have that element. # So for testing, we should avoid that value(0-$cpu_max). if cpu_max < 2: cpulist = "0-0" else: cpulist = "0-%s" % (cpu_max - 1) elif cpulist == "x,y": cpu_online_map = list(map(str, cpu.cpu_online_list())) cpulist = ','.join(random.sample(cpu_online_map, 2)) elif cpulist == "x-y,^z": cpulist = "0-%s,^%s" % (cpu_max, cpu_max) elif cpulist == "-1": cpulist = "-1" elif cpulist == "out_of_max": cpulist = str(cpu_max + 1) else: test.cancel("CPU-list=%s is not recognized." % cpulist) test_dicts['emulatorpin_cpulist'] = cpulist if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts, test) else: set_emulatorpin_parameter(test_dicts, test) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts, test) else: set_emulatorpin_parameter(test_dicts, test) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = int(open('/proc/cpuinfo').read().count('processor')) test_dicts['host_cpus'] = host_cpus cpu_list = None if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) # If the physical CPU N doesn't exist, it's an expected error if cpu_list and max(cpu_list) > host_cpus - 1: test_dicts["status_error"] = "yes" cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()
def run(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ try: utils_misc.find_command("numactl") except ValueError: raise error.TestNAError("Command 'numactl' is missing. You must " "install it.") # Run test case #vm_name = params.get("vms") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) cg = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### cgstop = False try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): raise error.TestNAError("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) finally: # Restore guest original_vm_xml.sync() # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run_virsh_numatune(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ # Run test case vm_name = params.get("vms") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) cgconfig_service = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) if cgconfig == "off": # Need to shutdown a running guest before stopping cgconfig service # and will start the guest after restarting libvirtd service if vm.is_alive(): vm.destroy() if cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_stop() # Refresh libvirtd service to get latest cgconfig service change if libvirtd == "restart": utils_libvirtd.libvirtd_restart() # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) # Recover cgconfig and libvirtd service if not cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_start() utils_libvirtd.libvirtd_restart() finally: # Restore guest original_vm_xml.sync()
elif cpulist == "x-y,^z": cpulist = "0-%s,^%s" % (cpu_max, cpu_max) elif cpulist == "-1": cpulist = "-1" elif cpulist == "out_of_max": cpulist = str(cpu_max + 1) else: raise error.TestNAError("CPU-list=%s is not recognized." % cpulist) test_dicts['emulatorpin_cpulist'] = cpulist if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) if status_error == "yes": if change_parameters == "no":
def run(test, params, env): """ Test blkio tuning 1) Positive testing 1.1) get the current blkio parameters for a running/shutoff guest 1.2) set the current blkio parameters for a running/shutoff guest 2) Negative testing 2.1) get blkio parameters for a running/shutoff guest 2.2) set blkio parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cg = utils_cgroup.CgconfigService() cgconfig = params.get("cgconfig", "on") libvirtd = params.get("libvirtd", "on") start_vm = params.get("start_vm", "yes") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Make sure vm is down if start not requested if start_vm == "no" and vm and vm.is_alive(): vm.destroy() test_dict = prepare_scheduler(params, test, vm) # positive and negative testing cgstop = False try: if start_vm == "yes" and not vm.is_alive(): vm.start() vm.wait_for_login() if status_error == "no": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): test.fail("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) finally: # Restore guest original_vm_xml.sync() with open(schedulerfd, 'w') as scf: scf.write(test_dict['oldmode']) # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test blkio tuning 1) Positive testing 1.1) get the current blkio parameters for a running/shutoff guest 1.2) set the current blkio parameters for a running/shutoff guest 2) Negative testing 2.1) get blkio parameters for a running/shutoff guest 2.2) set blkio parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cg = utils_cgroup.CgconfigService() cgconfig = params.get("cgconfig", "on") libvirtd = params.get("libvirtd", "on") start_vm = params.get("start_vm", "yes") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Make sure vm is down if start not requested if start_vm == "no" and vm and vm.is_alive(): vm.destroy() cmd = "cat /sys/block/sda/queue/scheduler" iosche = results_stdout_52lts(process.run(cmd, shell=True)) logging.debug("iosche value is:%s", iosche) oldmode = re.findall("\[(.*?)\]", iosche)[0] with open('/sys/block/sda/queue/scheduler', 'w') as scf: if 'cfq' in iosche: scf.write('cfq') elif 'bfq' in iosche: scf.write('bfq') else: test.fail('Unknown scheduler in "/sys/block/sda/queue/scheduler"') test_dict = dict(params) test_dict['vm'] = vm # positive and negative testing cgstop = False try: if start_vm == "yes" and not vm.is_alive(): vm.start() vm.wait_for_login() if status_error == "no": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): test.fail("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) finally: # Restore guest original_vm_xml.sync() with open('/sys/block/sda/queue/scheduler', 'w') as scf: scf.write(oldmode) # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() emulatorpin_placement = params.get("emulatorpin_placement", "") if emulatorpin_placement: vm.destroy() vmxml.placement = emulatorpin_placement vmxml.sync() vm.start() test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = utils.count_cpus() test_dicts['host_cpus'] = host_cpus cpu_max = int(host_cpus) - 1 cpu_list = None # Assemble cpu list for positive test if status_error == "no": if cpulist is None: pass elif cpulist == "x": cpulist = random.choice(utils.cpu_online_map()) elif cpulist == "x-y": cpulist = "0-%s" % cpu_max elif cpulist == "x,y": cpulist = ','.join(random.sample(utils.cpu_online_map(), 2)) elif cpulist == "x-y,^z": cpulist = "0-%s,^%s" % (cpu_max, cpu_max) elif cpulist == "-1": cpulist = "-1" elif cpulist == "out_of_max": cpulist = str(cpu_max + 1) else: raise error.TestNAError("CPU-list=%s is not recognized." % cpulist) test_dicts['emulatorpin_cpulist'] = cpulist if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()