def get_parameter_in_cgroup(vm, cgroup_type, parameter): """ Get vm's cgroup value. :Param vm: the vm object :Param cgroup_type: type of cgroup we want, vcpu or emulator. :Param parameter: the cgroup parameter of vm which we need to get. :return: the value of parameter in cgroup. """ vm_pid = vm.get_pid() cgtest = libvirt_cgroup.CgroupTest(vm_pid) cgroup_info = cgtest.get_standardized_cgroup_info("schedinfo") logging.debug("cgroup_info is %s" % cgroup_info) if parameter in ["cpu.cfs_period_us", "cpu.cfs_quota_us"]: if cgroup_type == "emulator": parameter = "%s/%s" % (cgroup_type, parameter) elif cgroup_type in ["vcpu", "iothread"]: parameter = "<%sX>/%s" % (cgroup_type, parameter) for key, value in libvirt_cgroup.CGROUP_V1_SCHEDINFO_FILE_MAPPING.items(): if value == parameter: cgroup_ref_key = key break if 'cgroup_ref_key' not in locals(): test.error("{} is not found in CGROUP_V1_SCHEDINFO_FILE_MAPPING." .format(parameter)) return cgroup_info[cgroup_ref_key]
def get_blkio_params_from_cgroup(params): """ Get a list of domain-specific per block stats from cgroup blkio controller. :param params: the parameter dictionary """ vm = params.get("vm") vm_pid = vm.get_pid() cgtest = libvirt_cgroup.CgroupTest(vm_pid) blkio_params_from_cgroup = cgtest.get_standardized_cgroup_info(virsh_cmd='blkiotune') logging.debug("The blkio values from cgroup is :'%s'", blkio_params_from_cgroup) return blkio_params_from_cgroup
def analyse_schedinfo_output(result, set_ref): """ Get the value of set_ref. :param result: CmdResult struct :param set_ref: the parameter has been set :return: the value of the parameter. """ cg_obj = libvirt_cgroup.CgroupTest(None) output_dict = cg_obj.convert_virsh_output_to_dict(result) result_info = cg_obj.get_standardized_virsh_info("schedinfo", output_dict) set_value_list = [] for set_ref_node in set_ref.split(","): if result_info.get(set_ref_node): set_value_list.append(result_info.get(set_ref_node)) return set_value_list
def get_cpuacct_info(suffix): """ Get the CPU accounting info within the vm :param suffix: str, suffix of the CPU accounting.(stat/usage/usage_percpu) :return: list, the list of CPU accounting info """ vm = env.get_vm(vm_ref) cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) cg_path = cg_obj.get_cgroup_path("cpuacct") # We only need the info in file which "emulator" is not in path if os.path.basename(cg_path) == "emulator": cg_path = os.path.dirname(cg_path) para = ('cpuacct.%s' % suffix) usage_file = os.path.join(cg_path, para) with open(usage_file, 'r') as f: cpuacct_info = f.read().strip().split() logging.debug("cpuacct info %s", cpuacct_info) return cpuacct_info
def get_emulatorpin_from_cgroup(params, test): """ Get a list of domain-specific per block stats from cgroup blkio controller. :param params: the parameter dictionary :param test: the test object :raises: test.error if an error happens """ vm = params.get("vm") cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) cpuset_path = cg_obj.get_cgroup_path("cpuset") if cg_obj.is_cgroup_v2_enabled(): cpuset_file = os.path.join(cpuset_path, "emulator/cpuset.cpus.effective") else: cpuset_file = os.path.join(cpuset_path, "cpuset.cpus") try: with open(cpuset_file, "rU") as f_emulatorpin_params: emulatorpin_params_from_cgroup = f_emulatorpin_params.readline() return emulatorpin_params_from_cgroup except IOError: test.error("Failed to get emulatorpin " "params from %s" % cpuset_file)
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel("Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: option = re.sub('_', '-', option) if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(utils_memory.getpagesize() - 1) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain cgtest = libvirt_cgroup.CgroupTest(pid) path = cgtest.get_cgroup_path("memory") logging.debug("cgroup path is %s", path) global mem_cgroup_info mem_cgroup_info = cgtest.get_cgroup_file_mapping(virsh_cmd='memtune') logging.debug("memtune cgroup info is %s", mem_cgroup_info) # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = mem_cgroup_info[mt_type] options = " --%s %s --live" % (re.sub('_', '-', mt_type), mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Test vcpu hotpluggable item in xml 1. Set the libvirtd log filter/level/file 2. Restart libvirtd 3. Start vm by xml with vcpu hotpluggable 4. Check the qemu command line 5. Check the libvirtd log 6. Restart libvrtd 7. Check the vm xml """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vcpus_placement = params.get("vcpus_placement", "static") vcpus_crt = int(params.get("vcpus_current", "4")) vcpus_max = int(params.get("vcpus_max", "8")) vcpus_enabled = params.get("vcpus_enabled", "") vcpus_hotplug = params.get("vcpus_hotpluggable", "") vcpus_order = params.get("vcpus_order") err_msg = params.get("err_msg", "") config_libvirtd = params.get("config_libvirtd", "yes") == "yes" log_file = params.get("log_file", "libvirtd.log") live_vcpus = params.get("set_live_vcpus", "") config_vcpus = params.get("set_config_vcpus", "") enable_vcpu = params.get("set_enable_vcpu", "") disable_vcpu = params.get("set_disable_vcpu", "") start_vm_after_config = params.get('start_vm_after_config', 'yes') == 'yes' # Backup domain XML vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml_backup = vmxml.copy() libvirtd = utils_libvirtd.Libvirtd() try: # Configure libvirtd log if config_libvirtd: config_path = os.path.join(data_dir.get_tmp_dir(), log_file) with open(config_path, 'a') as f: pass daemon_conf_dict = { "log_level": "1", "log_filters": "\"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event\"", "log_outputs": "\"1:file:{}\"".format(config_path) } daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict) # Restart libvirtd to make the changes take effect in libvirt libvirtd.restart() # Set vcpu: placement,current,max vcpu vmxml.placement = vcpus_placement vmxml.vcpu = vcpus_max vmxml.current_vcpu = vcpus_crt del vmxml.cpuset # Create vcpu xml with vcpu hotpluggable and order vcpu_list = [] vcpu = {} en_list = vcpus_enabled.split(",") hotplug_list = vcpus_hotplug.split(",") order_dict = ast.literal_eval(vcpus_order) for vcpu_id in range(vcpus_max): vcpu['id'] = str(vcpu_id) if str(vcpu_id) in en_list: vcpu['enabled'] = 'yes' if str(vcpu_id) in order_dict: vcpu['order'] = order_dict[str(vcpu_id)] else: vcpu['enabled'] = 'no' if str(vcpu_id) in hotplug_list: vcpu['hotpluggable'] = 'yes' else: vcpu['hotpluggable'] = 'no' vcpu_list.append(copy.copy(vcpu)) vcpu = {} vcpus_xml = vm_xml.VMVCPUSXML() vcpus_xml.vcpu = vcpu_list vmxml.vcpus = vcpus_xml # Remove influence from topology setting try: logging.info('Remove influence from topology setting') cpuxml = vmxml.cpu del cpuxml.topology vmxml.cpu = cpuxml except Exception as e: pass vmxml.sync() logging.debug("Before starting, VM xml:" "\n%s", vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)) # Start VM if start_vm_after_config: logging.info("Start VM with vcpu hotpluggable and order...") ret = virsh.start(vm_name, ignore_status=True) if err_msg: libvirt.check_result(ret, err_msg) else: if start_vm_after_config: # Wait for domain vm.wait_for_login() if enable_vcpu: ret = virsh.setvcpu(vm_name, enable_vcpu, "--enable", ignore_status=False, debug=True) vcpus_crt += 1 if disable_vcpu: ret = virsh.setvcpu(vm_name, disable_vcpu, "--disable", ingnore_status=False, debug=True) vcpus_crt -= 1 if live_vcpus: ret = virsh.setvcpus(vm_name, live_vcpus, ignore_status=False, debug=True) vcpus_crt = int(live_vcpus) if config_vcpus: ret = virsh.setvcpus(vm_name, config_vcpus, "--config", ignore_status=False, debug=True) # Check QEMU command line if start_vm_after_config: cmd = ("ps -ef| grep %s| grep 'maxcpus=%s'" % (vm_name, vcpus_max)) ret = process.run(cmd, ignore_status=False, shell=True) if ret.exit_status != 0: logging.error("Maxcpus in QEMU command line is wrong!") # Check libvirtd log if config_libvirtd and start_vm_after_config: for vcpu in vcpu_list: if vcpu['enabled'] == 'yes' and vcpu[ 'hotpluggable'] == 'yes': cmd = ( "cat %s| grep device_add| grep qemuMonitorIOWrite" "| grep 'vcpu%s'" % (config_path, vcpu['id'])) ret = process.run(cmd, ignore_status=False, shell=True) if ret.exit_status != 0: logging.error( "Failed to find lines about enabled vcpu%s" "in libvirtd log.", vcpu['id']) # Dumpxml dump_xml = virsh.dumpxml(vm_name).stdout.strip() vcpu_items = re.findall(r"vcpu.*", dump_xml) # Check guest vcpu count ret = virsh.vcpucount(vm_name, ignore_status=True, debug=True) output = ret.stdout.strip() max_list = re.findall(r"maximum.*[config|live].*%s\n" % vcpus_max, output) expect_num = 2 if start_vm_after_config else 1 if len(max_list) != expect_num: test.fail("vcpucount maximum info is not correct.") if live_vcpus: crt_live_list = re.findall(r"current.*live.*%s" % live_vcpus, output) logging.info("vcpucount crt_live_list: \n %s", crt_live_list) if len(crt_live_list) != 1: test.fail("vcpucount: current live info is not correct.") elif config_vcpus: crt_cfg_list = re.findall( r"current.*config.*%s" % config_vcpus, output) logging.info("vcpucount crt_cfg_list: \n %s", crt_cfg_list) if len(crt_cfg_list) != 1: test.fail("vcpucount: current config info is not correct.") else: crt_live_list = re.findall(r"current.*live.*%s" % vcpus_crt, output) logging.info("vcpucount crt_live_list: \n %s", crt_live_list) if len(crt_live_list) != 1: test.fail("vcpucount: current info is not correct.") # Check guest vcpu info ret = virsh.vcpuinfo(vm_name, ignore_status=True, debug=True) output = ret.stdout.strip() vcpu_lines = re.findall(r"VCPU:.*\n", output) logging.info("vcpuinfo vcpu_lines: \n %s", vcpu_lines) expect_num = vcpus_crt if start_vm_after_config else int( config_vcpus) if len(vcpu_lines) != expect_num: test.fail("vcpuinfo is not correct.") # Check cpu in guest if start_vm_after_config and not cpu.check_if_vm_vcpu_match( vcpus_crt, vm): test.fail( "cpu number in VM is not correct, it should be %s cpus" % vcpus_crt) # Check VM xml change for cold-plug/cold-unplug if config_vcpus: check_vcpu_after_plug_unplug(test, vm_name, config_vcpus) # Restart libvirtd libvirtd.restart() if config_vcpus and not start_vm_after_config: check_vm_exist(test, vm_name, 'shut off') # Recheck VM xml re_dump_xml = virsh.dumpxml(vm_name).stdout.strip() re_vcpu_items = re.findall(r"vcpu.*", re_dump_xml) if vcpu_items != re_vcpu_items: test.fail("After restarting libvirtd," "VM xml changed unexpectedly.") # Check cgroup info if start_vm_after_config: en_vcpu_list = re.findall(r"vcpu.*enabled=.yes.*", re_dump_xml) for vcpu_sn in range(len(en_vcpu_list)): vcpu_id = en_vcpu_list[vcpu_sn].split( "=")[1].split()[0].strip('\'') cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) cg_path = cg_obj.get_cgroup_path("cpuset") if cg_obj.is_cgroup_v2_enabled(): vcpu_path = os.path.join(cg_path, "vcpu%s" % vcpu_id) else: vcpu_path = os.path.join(cg_path, "../vcpu%s" % vcpu_id) if not os.path.exists(vcpu_path): test.fail( "Failed to find the enabled vcpu{} in {}.".format( vcpu_id, cg_path)) finally: # Recover libvirtd configuration if config_libvirtd and 'daemon_conf' in locals(): libvirt.customize_libvirt_config(None, remote_host=False, is_recover=True, config_object=daemon_conf) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run(test, params, env): """ Test virsh cpu-stats command. The command can display domain per-CPU and total statistics. 1. Call virsh cpu-stats [domain] 2. Call virsh cpu-stats [domain] with valid options 3. Call virsh cpu-stats [domain] with invalid options """ def get_cpuacct_info(suffix): """ Get the CPU accounting info within the vm :param suffix: str, suffix of the CPU accounting.(stat/usage/usage_percpu) :return: list, the list of CPU accounting info """ if 'cg_obj' not in locals(): return # On cgroup v2 use cpu.stat as a substitute if cg_obj.is_cgroup_v2_enabled(): cg_path = cg_obj.get_cgroup_path("cpu") para = ('cpu.%s' % suffix) else: cg_path = cg_obj.get_cgroup_path("cpuacct") para = ('cpuacct.%s' % suffix) # We only need the info in file which "emulator" is not in path if os.path.basename(cg_path) == "emulator": cg_path = os.path.dirname(cg_path) usage_file = os.path.join(cg_path, para) with open(usage_file, 'r') as f: cpuacct_info = f.read().strip().split() logging.debug("cpuacct info %s", cpuacct_info) return cpuacct_info def check_user_and_system_time(total_list): user_time = float(total_list[4]) system_time = float(total_list[7]) # Check libvirt user and system time between pre and next cgroup time # Unit conversion (Unit: second) # Default time unit is microseconds on cgroup v2 while 1/100 second on v1 if cg_obj.is_cgroup_v2_enabled(): pre_user_time = float(cpuacct_res_pre[3]) / 1000000 pre_sys_time = float(cpuacct_res_pre[5]) / 1000000 next_user_time = float(cpuacct_res_next[3]) / 1000000 next_sys_time = float(cpuacct_res_next[5]) / 1000000 else: pre_user_time = float(cpuacct_res_pre[1]) / 100 pre_sys_time = float(cpuacct_res_pre[3]) / 100 next_user_time = float(cpuacct_res_next[1]) / 100 next_sys_time = float(cpuacct_res_next[3]) / 100 # check user_time if next_user_time >= user_time >= pre_user_time: logging.debug("Got the expected user_time: %s", user_time) else: test.fail("Got unexpected user_time: %s, " % user_time + "should between pre_user_time:%s " % pre_user_time + "and next_user_time:%s" % next_user_time) # check system_time if next_sys_time >= system_time >= pre_sys_time: logging.debug("Got the expected system_time: %s", system_time) else: test.fail("Got unexpected system_time: %s, " % system_time + "should between pre_sys_time:%s " % pre_sys_time + "and next_sys_time:%s" % next_sys_time) if not virsh.has_help_command('cpu-stats'): test.cancel("This version of libvirt does not support " "the cpu-stats test") vm_name = params.get("main_vm", "vm1") vm_ref = params.get("cpu_stats_vm_ref") status_error = params.get("status_error", "no") options = params.get("cpu_stats_options") error_msg = params.get("error_msg", "") logging.debug("options are %s", options) if vm_ref == "name": vm_ref = vm_name vm = env.get_vm(vm_ref) if vm and vm.get_pid(): cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) # get host cpus num cpus = cpu.online_cpus_count() logging.debug("host online cpu num is %s", cpus) # get options and put into a dict get_total = re.search('total', options) get_start = re.search('start', options) get_count = re.search('count', options) # command without options get_noopt = 0 if not get_total and not get_start and not get_count: get_noopt = 1 # command with only --total option get_totalonly = 0 if not get_start and not get_count and get_total: get_totalonly = 1 option_dict = {} if options.strip(): option_list = options.split('--') logging.debug("option_list is %s", option_list) for match in option_list[1:]: if get_start or get_count: option_dict[match.split(' ')[0]] = match.split(' ')[1] # check if cpu is enough,if not cancel the test if (status_error == "no"): cpu_start = int(option_dict.get("start", "0")) if cpu_start == 32: cpus = cpu.total_cpus_count() logging.debug("Host total cpu num: %s", cpus) if (cpu_start >= cpus): test.cancel("Host cpus are not enough") # get CPU accounting info twice to compare with user_time and system_time cpuacct_res_pre = get_cpuacct_info('stat') # Run virsh command cmd_result = virsh.cpu_stats(vm_ref, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status cpuacct_res_next = get_cpuacct_info('stat') # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command! Output: {}".format( output)) else: # Check error message is expected if not re.search(error_msg, cmd_result.stderr.strip()): test.fail("Error message is not expected! " "Expected: {} Actual: {}".format( error_msg, cmd_result.stderr.strip())) elif status_error == "no": if status != 0: test.fail("Run failed with right command! Error: {}".format( cmd_result.stderr.strip())) else: # Get cgroup cpu_time if not get_totalonly: cgtime = get_cpuacct_info('usage_percpu') # Cut CPUs from output and format to list if get_total: mt_start = re.search('Total', output).start() else: mt_start = len(output) output_cpus = " ".join(output[:mt_start].split()) cpus_list = re.compile(r'CPU\d+:').split(output_cpus) # conditions that list total time info if get_noopt or get_total: mt_end = re.search('Total', output).end() total_list = output[mt_end + 1:].split() total_time = float(total_list[1]) check_user_and_system_time(total_list) start_num = 0 if get_start: start_num = int(option_dict["start"]) end_num = int(cpus) if get_count: count_num = int(option_dict["count"]) if end_num > start_num + count_num: end_num = start_num + count_num # for only give --total option it only shows "Total" cpu info if get_totalonly: end_num = -1 # find CPU[N] in output and sum the cpu_time and cgroup cpu_time sum_cputime = 0 sum_cgtime = 0 logging.debug("start_num %d, end_num %d", start_num, end_num) for i in range(start_num, end_num): logging.debug("Check CPU" + "%i" % i + " exist") sum_cputime += float(cpus_list[i - start_num + 1].split()[1]) sum_cgtime += float(cgtime[i]) if not re.search('CPU' + "%i" % i, output): test.fail("Fail to find CPU" + "%i" % i + "in " "result") # check cgroup cpu_time > sum of cpu_time if end_num >= 0: logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d", sum_cgtime, sum_cputime) if sum_cputime > sum_cgtime: test.fail("Check sum of cgroup cpu_time < sum " "of output cpu_time") # check Total cpu_time >= sum of cpu_time when no options if get_noopt: logging.debug( "Check total time %d >= sum of output cpu_time" " %d", total_time, sum_cputime) if total_time < sum_cputime: test.fail("total time < sum of output cpu_time")
def run(test, params, env): """ Test command: virsh schedinfo. This version provide base test of virsh schedinfo command: virsh schedinfo <vm> [--set<set_ref>] TODO: to support more parameters. 1) Get parameters and prepare vm's state 2) Prepare test options. 3) Run schedinfo command to set or get parameters. 4) Get schedinfo in cgroup 5) Recover environment like vm's state 6) Check result. """ def get_parameter_in_cgroup(vm, cgroup_type, parameter): """ Get vm's cgroup value. :Param vm: the vm object :Param cgroup_type: type of cgroup we want, vcpu or emulator. :Param parameter: the cgroup parameter of vm which we need to get. :return: the value of parameter in cgroup. """ vm_pid = vm.get_pid() cgtest = libvirt_cgroup.CgroupTest(vm_pid) cgroup_info = cgtest.get_standardized_cgroup_info("schedinfo") logging.debug("cgroup_info is %s" % cgroup_info) if parameter in ["cpu.cfs_period_us", "cpu.cfs_quota_us"]: if cgroup_type == "emulator": parameter = "%s/%s" % (cgroup_type, parameter) elif cgroup_type in ["vcpu", "iothread"]: parameter = "<%sX>/%s" % (cgroup_type, parameter) for key, value in libvirt_cgroup.CGROUP_V1_SCHEDINFO_FILE_MAPPING.items(): if value == parameter: cgroup_ref_key = key break if 'cgroup_ref_key' not in locals(): test.error("{} is not found in CGROUP_V1_SCHEDINFO_FILE_MAPPING." .format(parameter)) return cgroup_info[cgroup_ref_key] def analyse_schedinfo_output(result, set_ref): """ Get the value of set_ref. :param result: CmdResult struct :param set_ref: the parameter has been set :return: the value of the parameter. """ cg_obj = libvirt_cgroup.CgroupTest(None) output_dict = cg_obj.convert_virsh_output_to_dict(result) result_info = cg_obj.get_standardized_virsh_info("schedinfo", output_dict) set_value_list = [] for set_ref_node in set_ref.split(","): if result_info.get(set_ref_node): set_value_list.append(result_info.get(set_ref_node)) return set_value_list def get_current_value(): """ Get the current schedinfo value and return """ current_result = virsh.schedinfo(vm_ref, " --current", ignore_status=True, debug=True) current_value = analyse_schedinfo_output(current_result, set_ref) return current_value # Prepare test options vm_ref = params.get("schedinfo_vm_ref", "domname") options_ref = params.get("schedinfo_options_ref", "") options_suffix = params.get("schedinfo_options_suffix", "") schedinfo_param = params.get("schedinfo_param", "vcpu") set_ref = params.get("schedinfo_set_ref", "") cgroup_ref = params.get("schedinfo_cgroup_ref", "cpu.shares") set_value = params.get("schedinfo_set_value", "") set_method = params.get("schedinfo_set_method", "cmd") set_value_expected = params.get("schedinfo_set_value_expected", "") # Libvirt version where function begins to change libvirt_ver_function_changed = eval(params.get( "libvirt_ver_function_changed", '[]')) # The default scheduler on qemu/kvm is posix scheduler_value = "posix" status_error = params.get("status_error", "no") start_vm = ("yes" == params.get("start_vm")) readonly = ("yes" == params.get("schedinfo_readonly", "no")) expect_msg = params.get("schedinfo_err_msg", "") if libvirt_cgroup.CgroupTest(None).is_cgroup_v2_enabled(): if params.get("schedinfo_set_value_cgroupv2"): set_value = params.get("schedinfo_set_value_cgroupv2") if params.get("schedinfo_set_value_expected_cgroupv2"): set_value_expected = params.get( "schedinfo_set_value_expected_cgroupv2") if params.get("cgroup_v2_unsupported_reason"): test.cancel(params.get('cgroup_v2_unsupported_reason')) if libvirt_ver_function_changed: if not libvirt_version.version_compare(*libvirt_ver_function_changed): set_value = params.get("schedinfo_set_value_bk") set_value_expected = params.get("schedinfo_set_value_expected_bk") # Prepare vm test environment vm_name = params.get("main_vm") # For safety reasons, we'd better back up xmlfile. orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: test.error("Backing up xmlfile failed.") if set_ref == "none": options_ref = "--set" set_ref = None elif set_ref: # Prepare vm xml for iothread test if schedinfo_param == 'iothread': virsh.iothreadadd(vm_name, '1', ignore_status=False, debug=True) if set_method == 'cmd': if set_value: set_ref_list = set_ref.split(",") set_value_list = set_value.split(",") for i in range(0, len(set_ref_list)): if "--set" in options_ref: options_ref += " %s=%s" % (set_ref_list[i], set_value_list[i]) else: options_ref = "--set %s=%s" % (set_ref_list[i], set_value_list[i]) else: options_ref = "--set %s" % set_ref elif set_method == 'xml': xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: cputune = xml.cputune except xcepts.LibvirtXMLNotFoundError: cputune = vm_xml.VMCPUTuneXML() name_map = { 'cpu_shares': 'shares', 'vcpu_period': 'period', 'vcpu_quota': 'quota', 'emulator_period': 'emulator_period', 'emulator_quota': 'emulator_quota', 'global_period': 'global_period', 'global_quota': 'global_quota', 'iothread_period': 'iothread_period', 'iothread_quota': 'iothread_quota' } cputune[name_map[set_ref]] = int(set_value) xml.cputune = cputune xml.sync() logging.debug("After setting xml, VM XML:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm = env.get_vm(vm_name) if vm.is_dead() and start_vm: try: vm.start() except Exception as detail: orig_config_xml.sync() test.error(detail) domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "domid": vm_ref = domid elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = domuuid elif vm_ref == "hex_id": if domid == '-': vm_ref = domid else: vm_ref = hex(int(domid)) options_ref += " %s " % options_suffix # Get schedinfo with --current parameter if set_ref and options_ref.count("config") and start_vm: bef_current_value = get_current_value() try: # Run command result = virsh.schedinfo(vm_ref, options_ref, ignore_status=True, debug=True, readonly=readonly) status = result.exit_status # VM must be running to get cgroup parameters. if not vm.is_alive(): vm.start() if options_ref.count("config") and start_vm: # Get schedinfo with --current parameter aft_current_value = get_current_value() if bef_current_value != aft_current_value: test.fail("--config change the current %s" % set_ref) vm.destroy() vm.start() vm_ref = vm.get_id() if set_ref: start_current_value = get_current_value() set_value_of_cgroup = get_parameter_in_cgroup(vm, cgroup_type=schedinfo_param, parameter=cgroup_ref) vm.destroy(gracefully=False) if set_ref: set_value_of_output = analyse_schedinfo_output(result, set_ref) # Check result if status_error == "no": if status: test.fail("Run failed with right command. Error: {}" .format(result.stderr.strip())) else: if set_ref and set_value_expected: logging.info("value will be set:%s\n" "set value in output:%s\n" "set value in cgroup:%s\n" "expected value:%s" % ( set_value, set_value_of_output, set_value_of_cgroup, set_value_expected)) if set_value_of_output is None: test.fail("Get parameter %s failed." % set_ref) # Value in output of virsh schedinfo is not guaranteed 'correct' # when we use --config. # This is my attempt to fix it # http://www.redhat.com/archives/libvir-list/2014-May/msg00466.html. # But this patch did not go into upstream of libvirt. # Libvirt just guarantee that the value is correct in next boot # when we use --config. So skip checking of output in this case. expected_value_list = sorted(set_value_expected.split(',')) if (not (expected_value_list == sorted(set_value_of_output)) and not (options_ref.count("config"))): test.fail("Run successful but value " "in output is not expected.") if len(set_value_expected.split(',')) == 1: if not (set_value_expected == set_value_of_cgroup): test.fail("Run successful but value " "in cgroup is not expected.") if not (expected_value_list == sorted(start_current_value)): test.fail("Run successful but current " "value is not expected.") else: if not status: test.fail("Run successfully with wrong command. Output: {}" .format(result.stdout_text.strip())) if not re.search(expect_msg, result.stderr_text.strip()): test.fail("Fail to get expect err msg! " "Expected: {} Actual: {}" .format(expect_msg, result.stderr_text.strip())) finally: orig_config_xml.sync()