def get_blkio_params_from_cgroup(test, params): """ Get a list of domain-specific per block stats from cgroup blkio controller. :param domain: Domain name :param test: the test handle :param params: the parameter dictionary """ vm_name = params.get("main_vm") qemu_path = params.get("qemu_path") vm = params.get("vm") domid = vm.get_id() blkio_path = os.path.join(utils_cgroup.get_cgroup_mountpoint("blkio"), qemu_path, vm_name) if not os.path.isdir(blkio_path): # to convert "-" to "\x2d" for vm name on >=F19 and RHEL7.y name = vm_name.replace("-", "\\x2d") # qemu_path defaults as follows for >= F19 or RHEL7.y # qemu_path = "machine.slice/machine-qemu\\x2d%s.scope" % name # qemu_path defaults as follows for >= RHEL7.4 qemu_path = "machine.slice/machine-qemu\\x2d%s\\x2d%s.scope" % (domid, name) blkio_path = os.path.join(utils_cgroup.get_cgroup_mountpoint("blkio"), qemu_path) bfq_scheduler = False with open(schedulerfd, 'r') as iosche: if 'bfq' in iosche.readline(): bfq_scheduler = True if bfq_scheduler: blkio_weight_file = os.path.join(blkio_path, "blkio.bfq.weight") blkio_device_weights_file = None else: blkio_weight_file = os.path.join(blkio_path, "blkio.weight") blkio_device_weights_file = os.path.join(blkio_path, "blkio.weight_device") blkio_params_from_cgroup = {} for f in blkio_weight_file, blkio_device_weights_file: if f: try: with open(f, 'rU') as f_blkio_params: val = f_blkio_params.readline().split() if len(val) > 1: blkio_params_from_cgroup[f.split('.')[-1]] = \ val[0] + "," + val[1] elif len(val) == 1: blkio_params_from_cgroup[f.split('.')[-1]] = val[0] except IOError: test.fail("Failed to get blkio params from %s" % f) logging.debug(blkio_params_from_cgroup) return blkio_params_from_cgroup
def get_emulatorpin_from_cgroup(params): """ Get a list of domain-specific per block stats from cgroup blkio controller. :params: the parameter dictionary """ vm_name = params.get("main_vm") qemu_path = params.get("qemu_path") if not qemu_path: # qemu_path defaults "/libvirt/qemu/" on RHEL6.y qemu_path = "/libvirt/qemu/" cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") + \ qemu_path + vm_name cpuset_file = os.path.join(cpuset_path, "emulator/cpuset.cpus") try: f_emulatorpin_params = open(cpuset_file, "rU") emulatorpin_params_from_cgroup = f_emulatorpin_params.readline() f_emulatorpin_params.close() return emulatorpin_params_from_cgroup except IOError: raise error.TestError("Failed to get emulatorpin " "params from %s" % cpuset_file)
def test_get_cgroup_mountpoint(self): for case in mount_cases: # Let's work around the fact that NamedTemporaryFile # on py 2.4 doesn't have the delete param mount_file = tempfile.NamedTemporaryFile() mount_file_path = mount_file.name mount_file.close() # Now let's do our own management of the file mount_file = open(mount_file_path, 'w') mount_file.write(case["mount_txt"]) mount_file.close() try: for idx, controller in enumerate(case["controllers"]): res = utils_cgroup.get_cgroup_mountpoint( controller, mount_file_path) self.assertEqual(case["mount_points"][idx], res) self.assertRaises( exceptions.TestError, utils_cgroup.get_cgroup_mountpoint, "non_exit_ctlr", mount_file_path) finally: os.remove(mount_file_path)
def get_parameter_in_cgroup(domname, controller="cpu", parameter="cpu.shares", libvirt_cgroup_path="/libvirt/qemu/"): """ Get vm's cgroup value. @Param domname: vm's name @Param controller: the controller which parameter is in. @Param parameter: the cgroup parameter of vm which we need to get. @Param libvirt_cgroup_path: the path of libvirt in cgroup :return: False if expected controller is not mounted. else return value's result object. """ try: ctl_mount = utils_cgroup.get_cgroup_mountpoint(controller) except IndexError: return None if ctl_mount is not False: cgroup_path = os.path.join(ctl_mount, libvirt_cgroup_path, domname, parameter) if not os.path.exists(cgroup_path): cgroup_path = os.path.join(ctl_mount, "machine", domname + ".libvirt-qemu", parameter) if not os.path.exists(cgroup_path): raise error.TestNAError("Unknown path to cgroups") get_value_cmd = "cat %s" % cgroup_path result = utils.run(get_value_cmd, ignore_status=True) return result.stdout.strip() else: return None
def get_parameter_in_cgroup(domname, controller="cpu", parameter="cpu.shares", libvirt_cgroup_path="libvirt/qemu/"): """ Get vm's cgroup value. @Param domname: vm's name @Param controller: the controller which parameter is in. @Param parameter: the cgroup parameter of vm which we need to get. @Param libvirt_cgroup_path: the path of libvirt in cgroup :return: False if expected controller is not mounted. else return value's result object. """ try: ctl_mount = utils_cgroup.get_cgroup_mountpoint(controller) except IndexError: return None if ctl_mount is not False: cgroup_path = os.path.join(ctl_mount, libvirt_cgroup_path, domname, parameter) if not os.path.exists(cgroup_path): cgroup_path = os.path.join(ctl_mount, "machine", domname + ".libvirt-qemu", parameter) if not os.path.exists(cgroup_path): raise error.TestNAError("Unknown path to cgroups") get_value_cmd = "cat %s" % cgroup_path result = utils.run(get_value_cmd, ignore_status=True) return result.stdout.strip() else: return None
def get_blkio_params_from_cgroup(test, params): """ Get a list of domain-specific per block stats from cgroup blkio controller. :param domain: Domain name :param test: the test handle :param params: the parameter dictionary """ vm_name = params.get("main_vm") qemu_path = params.get("qemu_path") vm = params.get("vm") domid = vm.get_id() blkio_path = os.path.join(utils_cgroup.get_cgroup_mountpoint("blkio"), qemu_path, vm_name) if not os.path.isdir(blkio_path): # to convert "-" to "\x2d" for vm name on >=F19 and RHEL7.y name = vm_name.replace("-", "\\x2d") # qemu_path defaults as follows for >= F19 or RHEL7.y # qemu_path = "machine.slice/machine-qemu\\x2d%s.scope" % name # qemu_path defaults as follows for >= RHEL7.4 qemu_path = "machine.slice/machine-qemu\\x2d%s\\x2d%s.scope" % (domid, name) blkio_path = os.path.join(utils_cgroup.get_cgroup_mountpoint("blkio"), qemu_path) blkio_weight_file = os.path.join(blkio_path, "blkio.weight") blkio_device_weights_file = os.path.join(blkio_path, "blkio.weight_device") blkio_params_from_cgroup = {} for f in blkio_weight_file, blkio_device_weights_file: try: with open(f, 'rU') as f_blkio_params: val = f_blkio_params.readline().split() if len(val) > 1: blkio_params_from_cgroup[f.split('.')[-1]] = \ val[0] + "," + val[1] elif len(val) == 1: blkio_params_from_cgroup[f.split('.')[-1]] = val[0] except IOError: test.fail("Failed to get blkio params from %s" % f) logging.debug(blkio_params_from_cgroup) return blkio_params_from_cgroup
def test_get_cgroup_mountpoint(self): for case in mount_cases: # Let's work around the fact that NamedTemporaryFile # on py 2.4 doesn't have the delete param mount_file = tempfile.NamedTemporaryFile() mount_file_path = mount_file.name mount_file.close() # Now let's do our own management of the file mount_file = open(mount_file_path, 'w') mount_file.write(case["mount_txt"]) mount_file.close() try: for idx, controller in enumerate(case["controllers"]): res = utils_cgroup.get_cgroup_mountpoint( controller, mount_file_path) self.assertEqual(case["mount_points"][idx], res) self.assertRaises(exceptions.TestError, utils_cgroup.get_cgroup_mountpoint, "non_exit_ctlr", mount_file_path) finally:
def set_condition(vm_name, condn, reset=False, guestbt=None): """ Set domain to given state or reset it. """ bt = None if not reset: if condn == "avocado_test": testlist = utils_test.get_avocadotestlist(params) bt = utils_test.run_avocado_bg(vm, params, test, testlist) if not bt: test.cancel("guest stress failed to start") # Allow stress to start time.sleep(condn_sleep_sec) return bt elif condn == "stress": utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) elif condn in ["save", "managedsave"]: # No action pass elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "hotplug": result = virsh.setvcpus(vm_name, max_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': max_vcpu, 'guest_live': max_vcpu } result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": if cpuutil.get_cpu_vendor_name() == 'power9': result = process.run("ppc64_cpu --smt=4", shell=True) else: test.cancel( "Host SMT changes not allowed during guest live") else: logging.debug("No operation for the domain") else: if condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif condn == "managedsave": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "avocado_test": guestbt.join() elif condn == "stress": utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) elif condn == "hotplug": result = virsh.setvcpus(vm_name, current_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': current_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu } result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": result = process.run("ppc64_cpu --smt=2", shell=True) # Change back the host smt result = process.run("ppc64_cpu --smt=4", shell=True) # Work around due to known cgroup issue after cpu hot(un)plug # sequence root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") machine_cpuset_paths = [] if os.path.isdir( os.path.join(root_cpuset_path, "machine.slice")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine.slice")) if os.path.isdir(os.path.join(root_cpuset_path, "machine")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine")) if not machine_cpuset_paths: logging.warning("cgroup cpuset might not recover properly " "for guests after host smt changes, " "restore it manually") root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus") for path in machine_cpuset_paths: machine_cpuset_cpus = os.path.join(path, "cpuset.cpus") # check if file content differs cmd = "diff %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) if process.system(cmd, verbose=True, ignore_status=True): cmd = "cp %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) process.system(cmd, verbose=True) else: logging.debug("No need recover the domain") return bt
def set_condition(vm_name, condn, reset=False, guestbt=None): """ Set domain to given state or reset it. """ bt = None if not reset: if condn == "avocadotest": bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") # Allow stress to start time.sleep(condn_sleep_sec) return bt elif condn == "stress": utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) elif condn in ["save", "managedsave"]: # No action pass elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "hotplug": result = virsh.setvcpus(vm_name, max_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': max_vcpu, 'guest_live': max_vcpu} result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": if cpu.get_cpu_arch() == 'power9': result = process.run("ppc64_cpu --smt=4", shell=True) else: test.cancel("Host SMT changes not allowed during guest live") else: logging.debug("No operation for the domain") else: if condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif condn == "managedsave": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "avocadotest": guestbt.join(ignore_status=True) elif condn == "stress": utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) elif condn == "hotplug": result = virsh.setvcpus(vm_name, current_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": result = process.run("ppc64_cpu --smt=2", shell=True) # Change back the host smt result = process.run("ppc64_cpu --smt=4", shell=True) # Work around due to known cgroup issue after cpu hot(un)plug # sequence root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") machine_cpuset_paths = [] if os.path.isdir(os.path.join(root_cpuset_path, "machine.slice")): machine_cpuset_paths.append(os.path.join(root_cpuset_path, "machine.slice")) if os.path.isdir(os.path.join(root_cpuset_path, "machine")): machine_cpuset_paths.append(os.path.join(root_cpuset_path, "machine")) if not machine_cpuset_paths: logging.warning("cgroup cpuset might not recover properly " "for guests after host smt changes, " "restore it manually") root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus") for path in machine_cpuset_paths: machine_cpuset_cpus = os.path.join(path, "cpuset.cpus") # check if file content differs cmd = "diff %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) if process.system(cmd, verbose=True, ignore_status=True): cmd = "cp %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) process.system(cmd, verbose=True) else: logging.debug("No need recover the domain") return bt