def operations(): """ Do save | domstats | blkdeviotune operations """ if self.operation == "save": virsh.save(self.vm_name, self.save_file, debug=True, timeout=self.operation_timeout) if self.operation == "domstats": virsh.domstats(self.vm_name, opertions=self.operation_option, debug=True, timeout=self.operation_timeout) if self.operation == "blkdeviotune": virsh.blkdeviotune(self.vm_name, self.disk_tgt, debug=True, timeout=self.operation_timeout)
def check_event_value(vm_name, perf_option, event): """ Check domstats output and if the event has a value as expect 1. if perf_option == --disable, there isn't a value/line 2. if perf_option == --enable, there is a value/line :param vm_name: Domain name,id :param perf_option: --enable or --disable :param vent: perf event name """ logging.debug("check_event_value: vm_name= %s, perf_option=%s, event=%s", vm_name, perf_option, event) ret = False result = virsh.domstats(vm_name, "--perf", ignore_status=True, debug=True) libvirt.check_exit_status(result) output = result.stdout.strip() logging.debug("domstats output is %s", output) if perf_option == '--enable': for line in output.split('\n'): if '.' in line and event == (line.split('.')[1]).split('=')[0]: ret = True else: ret = True for line in output.split('\n'): if '.' in line and event == (line.split('.')[1]).split('=')[0]: ret = False return ret
def check_vm_state_after_abort(vm_name, vm_state_after_abort, src_uri, dest_uri, test): """ Check the VM state after domjobabort the migration :param vm_name: str, vm name :param vm_state_after_abort: str, like "{'source': 'running', 'target': 'nonexist'}" source: local host, target: remote host :param src_uri: uri for source host :param dest_uri: uri for target host :param test: test object """ state_dict = eval(vm_state_after_abort) logging.debug("Check guest state should be {} on source host".format( state_dict['source'])) libvirt.check_vm_state(vm_name, state=state_dict['source'], uri=src_uri) logging.debug("Check guest persistent on source host") cmd_res = virsh.domstats(vm_name, '--list-persistent', debug=True, ignore_status=False) if not cmd_res.stdout_text.count(vm_name): test.fail( "The guest is expected to be persistent on source host, but it isn't" ) logging.debug("Check guest state should be {} on target host".format( state_dict['target'])) if state_dict['target'] == 'nonexist': if virsh.domain_exists(vm_name, uri=dest_uri): test.fail("The domain on target host is found, but expected not") else: libvirt.check_vm_state(vm_name, state=state_dict['target'], uri=dest_uri)
def get_domstats(vm, key): """ Get VM's domstats output value for given keyword :param vm: VM object :param key: keyword for which value is needed :return: value string """ domstats_output = virsh.domstats(vm.name) for item in domstats_output.stdout_text.strip().split(): if key in item: return item.split("=")[1]
def get_domstats(vm, key): """ Get VM's domstats output value for given keyword :param vm: VM object :param key: keyword for which value is needed :return: value string """ domstats_output = virsh.domstats(vm.name) for item in results_stdout_52lts(domstats_output).strip().split(): if key in item: return item.split("=")[1]
def execute_statistics_command(params): """ Execute statistics command :param params: dict, used to setup the connection """ vm_name = params.get("migrate_main_vm") disk_type = params.get("disk_type") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all_by_expr('type==%s' % disk_type, 'device==disk') logging.debug("disks: %s", disks) debug_kargs = {'ignore_status': False, 'debug': True} for disk in list(disks.values()): disk_source = disk.find('source').get('dev') disk_target = disk.find('target').get('dev') logging.debug("disk_source: %s", disk_source) logging.debug("disk_target: %s", disk_target) virsh.domblkstat(vm_name, disk_target, "", **debug_kargs) virsh.domblkinfo(vm_name, disk_source, **debug_kargs) virsh.domstats(vm_name, **debug_kargs) virsh.dommemstat(vm_name, **debug_kargs)
def test_iothread(vm_name, iothread_add_ids, iothread_del_ids, domstats_output, test): """ Check domstats by operating iothread :param vm_name: vm name :param iothread_add_ids: list, iothread id to be added :param iothread_del_ids: list, iothread id to be deleted :param domstats_output: output of virsh domstats --iothread :param test: test object """ check_domstats_for_iothread(iothread_add_ids, None, domstats_output, test) del_iothread(vm_name, iothread_del_ids) output = virsh.domstats(vm_name, '--iothread', ignore_status=False, debug=True).stdout_text.strip() check_domstats_for_iothread(iothread_add_ids, iothread_del_ids, output, test)
def get_iothread_pool(vm_name, thread_id): """ Get iothread pool values for the specified iothread id :param vm_name: name of vm :param thread_id: thread id :return: iothread pool time values """ iothread_pool = {} domstats_output = virsh.domstats(vm_name, "--iothread", debug=True) for item in re.findall("iothread." + thread_id + ".poll.*", domstats_output.stdout): iothread_pool[item.split("=")[0]] = item.split("=")[1] logging.debug("iothread pool values for thread id {} are {}.".format( thread_id, iothread_pool)) return iothread_pool
def check_output(): """ Check the dirty rate reported by domstats """ res = virsh.domstats(vm_name, "--dirtyrate", debug=True) out_list = res.stdout_text.strip().splitlines()[1:] out_dict = {} out_dict = dict(item.strip().split("=") for item in out_list) if out_dict["dirtyrate.calc_status"] != calc_status: test.fail("Calculating dirty rate should be completed " "after %s seconds" % period) if out_dict["dirtyrate.calc_period"] != period: test.fail("Calculating period is not the same with " "the setting period %s" % period) dirty_rate = out_dict["dirtyrate.megabytes_per_second"] if abs(int(dirty_rate) / int(ram_size) - 1) > 0.75: test.fail("Dirty rate calculated %s has a big difference " "with the ram size %s loaded in guest " % (dirty_rate, ram_size))
def check_slice_hot_operate(vm, params, test): """ Check hot operation on disk with slice attribute :param vm: one object representing VM :param params: wrapped parameters in dictionary format :param test: test assert object """ device_target = params.get("target_dev") try: session = vm.wait_for_login() _, output = session.cmd_status_output( "dd if=/dev/zero of=/dev/%s bs=10M count=1" % device_target) LOG.info("Fill contents in VM:\n%s", output) session.close() except Exception as e: LOG.error(str(e)) session.close() domstats_raw_output = virsh.domstats(vm.name, "--block", ignore_status=False, debug=True).stdout_text.strip() domstats_dict = translate_raw_output_into_dict(domstats_raw_output) domblkinfo_raw_output = virsh.domblkinfo(vm.name, device_target, ignore_status=False, debug=True).stdout_text.strip() domblkinfo_dict = translate_raw_output_into_dict(domblkinfo_raw_output, default_delimiter=":") # Verify allocation, capacity, and physical are the same from domstats and domblkinfo for key in ['Allocation', 'Capacity', 'Physical']: if domstats_dict["block.1.%s" % key.lower()] != domblkinfo_dict[key]: test.fail("The domstats value: %s is not equal to domblkinfo: %s" % (domstats_dict["block.1.%s" % key.lower()], domblkinfo_dict[key]))
def run(test, params, env): """ Test:<memorytune> 1. Check virsh capabilities report right MBA info 2 Mount resctrl 3. Check host MBA info from virsh capabilities output 4. Add memory bandwidth in domain XML and start vm 5. check resctrl dir and verify libvirt set right values """ vm_name = params.get("main_vm", "avocado-vt-vm1") test_vm = env.get_vm(vm_name) schemata_file1 = params.get("schemata_file1", "") schemata_file2 = params.get("schemata_file2", "") mb_value1 = params.get("mb_value1", "") mb_value2 = params.get("mb_value2", "") vcpu_max_num = int(params.get("vcpu_max_num")) vcpu_current_num = int(params.get("vcpu_current_num")) topology_correction = "yes" == params.get("topology_correction", "no") cachetune_items = params.get("cachetune_items") test_memtune = params.get("test_memtune", 'no') == 'yes' memorytune_item_list, node_item_list, mem_monitor_item_list = None, None, None vcpupins = params.get("vcpupins") placement = params.get('placement') iothreads = params.get('iothreads') vcpuscheds = params.get('vcpuscheds') emulatorsched = params.get('emulatorsched') check_policy = params.get('check_policy') if test_memtune: check_host(test) (memorytune_item_list, node_item_list, mem_monitor_item_list) = assemble_memorytune(params) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # change the vcpu number from 2 to 5 vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num, topology_correction=topology_correction) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if placement: vmxml.placement = placement if iothreads: vmxml.iothreads = int(iothreads) cputunexml = vm_xml.VMCPUTuneXML() if memorytune_item_list: cputunexml = update_with_memorytune(cputunexml, memorytune_item_list, node_item_list, mem_monitor_item_list) if cachetune_items: cputunexml = update_with_cachetune(cputunexml, params) if vcpupins: cputunexml.vcpupins = eval(vcpupins) if vcpuscheds: cputunexml.vcpuscheds = eval(vcpuscheds) if emulatorsched: cputunexml.emulatorsched = emulatorsched logging.debug("cputunexml: %s" % cputunexml) vmxml.cputune = cputunexml logging.debug("vm xml: %s", vmxml) vmxml.sync() test_vm.start() logging.debug("VM xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) if test_memtune: # 5.Check resctrl dir and verify libvirt set right values check_membind_value(test, schemata_file1, mb_value1) check_membind_value(test, schemata_file2, mb_value2) found_mb = verify_membind_value(schemata_file1, mb_value1) if not found_mb: test.fail("The first schemata %s for vcpus is not set valid" % schemata_file1) found_mb = verify_membind_value(schemata_file2, mb_value2) if not found_mb: test.fail("The second schemata %s for vcpus is not set valid" % schemata_file2) # 6. Check domstats memory if libvirt_version.version_compare(6, 0, 0): result = virsh.domstats(vm_name, "--memory", ignore_status=True, debug=True) libvirt.check_exit_status(result) output = result.stdout.strip() logging.debug("domstats output is %s", output) # 7. Destroy the vm and verify the libvirt dir exist test_vm.destroy(gracefully=False) if os.path.exists(schemata_file1) or os.path.exists( schemata_file2): test.fail( "The schemata file should be deleted after vm destroy") if check_policy: check_scheduler_policy(eval(check_policy), test) finally: if test_vm.is_alive(): test_vm.destroy(gracefully=False) if test_memtune: process.run("umount /sys/fs/resctrl", verbose=True, shell=True) backup_xml.sync()
def run(test, params, env): """ Test command: virsh domstats. 1.Prepare vm state. 2.Perform virsh domstats operation. 3.Confirm the test result. 4.Recover test environment. """ default_vm_name = params.get("main_vm", "virt-tests-vm1") default_vm = env.get_vm(default_vm_name) vm_list = params.get("vm_list", "") vm_state = params.get("vm_state", "") domstats_option = params.get("domstats_option") raw_print = "yes" == params.get("raw_print", "no") enforce_command = "yes" == params.get("enforce_command", "no") status_error = (params.get("status_error", "no") == "yes") vms = [default_vm] if vm_list: for name in vm_list.split(): if name != default_vm_name: vms.append(env.get_vm(name)) backup_xml_list = [] try: if not status_error: for vm in vms: # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) backup_xml_list.append(vmxml.copy()) if vm_state == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = "preserve" # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() virsh.start(vm.name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm.name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise error.TestNAError("No 'panic' device in the guest," " maybe your libvirt version" " doesn't support it.") prepare_vm_state(vm, vm_state) if enforce_command: domstats_option += " --enforce" if raw_print: domstats_option += " --raw" # Run virsh command result = virsh.domstats(vm_list, domstats_option, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if "unsupported flags" in result.stderr: raise error.TestNAError(result.stderr) raise error.TestFail("Run successfully with wrong command!") else: if status: raise error.TestFail("Run failed with right command") else: for vm in vms: if not check_output(output, vm, vm_state, domstats_option): raise error.TestFail("Check command output failed") finally: try: for vm in vms: vm.destroy(gracefully=False) except AttributeError: pass for backup_xml in backup_xml_list: backup_xml.sync()
def run(test, params, env): """ Test command: virsh domstats. 1.Prepare vm state. 2.Perform virsh domstats operation. 3.Confirm the test result. 4.Recover test environment. """ default_vm_name = params.get("main_vm", "avocado-vt-vm1") default_vm = env.get_vm(default_vm_name) vm_list = params.get("vm_list", "") vm_state = params.get("vm_state", "") domstats_option = params.get("domstats_option") raw_print = "yes" == params.get("raw_print", "no") enforce_command = "yes" == params.get("enforce_command", "no") iothread_add_ids = eval(params.get("iothread_add_ids", '[]')) iothread_del_ids = eval(params.get("iothread_del_ids", '[]')) status_error = (params.get("status_error", "no") == "yes") if "--nowait" in domstats_option and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--nowait option is supported until libvirt 4.5.0 version...") vms = [default_vm] if vm_list: for name in vm_list.split(): if name != default_vm_name: if env.get_vm(name): vms.append(env.get_vm(name)) backup_xml_list = [] try: if not status_error: for vm in vms: # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) backup_xml_list.append(vmxml.copy()) if vm_state == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = "preserve" # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() virsh.start(vm.name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm.name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall( 'panic'): test.cancel("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it.") prepare_vm_state(vm, vm_state) if enforce_command: domstats_option += " --enforce" if raw_print: domstats_option += " --raw" if "--nowait" in domstats_option: pool = ThreadPool(processes=1) async_result = pool.apply_async(create_snap, (vm_list, '--no-metadata')) return_val = async_result.get() if "--iothread" in domstats_option: add_iothread(vm_list, iothread_add_ids) # Run virsh command result = virsh.domstats(vm_list, domstats_option, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if "unsupported flags" in result.stderr: test.cancel(result.stderr) test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") else: for vm in vms: if not check_output(output, vm, vm_state, domstats_option): test.fail("Check command output failed") if "--iothread" in domstats_option: test_iothread(vm_list, iothread_add_ids, iothread_del_ids, output, test) finally: try: for vm in vms: vm.destroy(gracefully=False) except AttributeError: pass for backup_xml in backup_xml_list: backup_xml.sync()
def check_source_stats(vm_name): """domstats in source""" vm_stats = virsh.domstats(vm_name) logging.debug("domstats in source: {}".format(vm_stats))
def run(test, params, env): """ Test:<memorytune> 1. Check virsh capabilities report right MBA info 2 Mount resctrl 3. Check host MBA info from virsh capabilities output 4. Add memory bandwidth in domain XML and start vm 5. check resctrl dir and verify libvirt set right values """ vm_name = params.get("main_vm", "avocado-vt-vm1") test_vm = env.get_vm(vm_name) schemata_file1 = params.get("schemata_file1", "") schemata_file2 = params.get("schemata_file2", "") mb_value1 = params.get("mb_value1", "") mb_value2 = params.get("mb_value2", "") vcpu_max_num = int(params.get("vcpu_max_num")) vcpu_current_num = int(params.get("vcpu_current_num")) topology_correction = "yes" == params.get("topology_correction", "no") # 1.Check virsh capabilities if utils_misc.get_cpu_info()['Flags'].find('mba ') == -1: test.cancel("This machine doesn't support cpu 'mba' flag") # 2.Mount resctrl process.run("mount -t resctrl resctrl /sys/fs/resctrl", verbose=True, shell=True) process.run("echo 'L3:0=0ff;1=0ff' > /sys/fs/resctrl/schemata", verbose=True, shell=True) # 3.Check host MBA info from virsh capabilities output cmd = "virsh capabilities | awk '/<memory_bandwidth>/,\ /<\/memory_bandwidth>/'" out = "" out = process.run(cmd, shell=True).stdout_text if not re.search('node', out): test.fail("There is no memory_bandwidth info in capablities") # 4.Add memory bandwidth in domain XML memorytune_item_list = [ast.literal_eval(x) for x in params.get("memorytune_items", "").split(';')] node_item_list1 = [ast.literal_eval(x) for x in params.get("node_items1", "").split(';')] node_item_list2 = [ast.literal_eval(x) for x in params.get("node_items2", "").split(';')] node_item_list = [] node_item_list.append(node_item_list1) node_item_list.append(node_item_list2) cachetune_items = params.get("cachetune_items") mem_monitor_item1 = [ast.literal_eval(x) for x in params.get("mem_monitor_item1", "").split(';')] mem_monitor_item2 = [ast.literal_eval(x) for x in params.get("mem_monitor_item2", "").split(';')] mem_monitor_item_list = [] mem_monitor_item_list.append(mem_monitor_item1) mem_monitor_item_list.append(mem_monitor_item2) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # change the vcpu number from 2 to 5 vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num, topology_correction=topology_correction) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) cputunexml = vm_xml.VMCPUTuneXML() logging.debug("cputunexml: %s" % cputunexml) if memorytune_item_list: for mitem in range(len(memorytune_item_list)): logging.debug("node %d " % mitem) memorytunexml = vm_xml.MemoryTuneXML() memorytunexml.vcpus = memorytune_item_list[mitem]['vcpus'] for node in node_item_list[mitem]: nodexml = memorytunexml.NodeXML() nodexml.id = node['id'] nodexml.bandwidth = node['bandwidth'] memorytunexml.set_node(nodexml) for monitor in mem_monitor_item_list[mitem]: monitorxml = memorytunexml.MonitorXML() monitorxml.vcpus = monitor['vcpus'] memorytunexml.set_monitor(monitorxml) logging.debug("memorytunexml.xml %s" % memorytunexml.xml) cputunexml.set_memorytune(memorytunexml) logging.debug("cputunexml.xml %s" % cputunexml.xml) if cachetune_items: cachetune_item_list = [ast.literal_eval(x) for x in params.get("cachetune_items", "").split(';')] cache_item_list = [ast.literal_eval(x) for x in params.get("cache_items", "").split(';')] monitor_item_list = [ast.literal_eval(x) for x in params.get("monitor_items", "").split(';')] for citem in range(len(cachetune_item_list)): logging.debug("cache %d " % citem) cachetunexml = vm_xml.CacheTuneXML() logging.debug("cachetunexml: %s" % cachetunexml) cachetunexml.vcpus = cachetune_item_list[citem]['vcpus'] for cache in cache_item_list: cachexml = cachetunexml.CacheXML() cachexml.id = cache['id'] cachexml.level = cache['level'] cachexml.type = cache['type'] cachexml.size = cache['size'] cachexml.unit = cache['unit'] cachetunexml.set_cache(cachexml) for monitor in monitor_item_list: monitorxml = cachetunexml.MonitorXML() monitorxml.level = monitor['level'] monitorxml.vcpus = monitor['vcpus'] cachetunexml.set_monitor(monitorxml) cputunexml.set_cachetune(cachetunexml) vmxml.cputune = cputunexml logging.debug("vm xml: %s", vmxml) vmxml.sync() test_vm.start() # 5.Check resctrl dir and verify libvirt set right values check_membind_value(test, schemata_file1, mb_value1) check_membind_value(test, schemata_file2, mb_value2) found_mb = verify_membind_value(schemata_file1, mb_value1) if not found_mb: test.fail("The first schemata %s for vcpus is not set valid" % schemata_file1) found_mb = verify_membind_value(schemata_file2, mb_value2) if not found_mb: test.fail("The second schemata %s for vcpus is not set valid" % schemata_file2) # 6. Check domstats memory if libvirt_version.version_compare(6, 0, 0): result = virsh.domstats(vm_name, "--memory", ignore_status=True, debug=True) libvirt.check_exit_status(result) output = result.stdout.strip() logging.debug("domstats output is %s", output) # 7. Destroy the vm and verify the libvirt dir exist test_vm.destroy(gracefully=False) if os.path.exists(schemata_file1) or os.path.exists(schemata_file2): test.fail("The schemata file should be deleted after vm destroy") finally: if test_vm.is_alive(): test_vm.destroy(gracefully=False) process.run("umount /sys/fs/resctrl", verbose=True, shell=True) backup_xml.sync()