def is_mounted(self): """ Check the NFS is mounted or not. :return: If the src is mounted as expect :rtype: Boolean """ return utils_misc.is_mounted(self.mount_src, self.mount_dir, "nfs")
def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)
cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format) else: cmd += " | grep gluster.*format=%s" % disk_format if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see gluster option '%s' " "in command line" % cmd) finally: # cleanup swap if pm_enabled: try: if vm.state() == "running": vm.cleanup_swap() except (remote.LoginError, virt_vm.VMError), e: logging.error("Failed to cleanup swap on guest: %s", e) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'): process.run("umount %s" % default_pool, ignore_status=True, shell=True) if gluster_disk: libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
def run(test, params, env): """ Test hpt resizing """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = eval(params.get('error_msg', '[]')) hpt_attrs = eval(params.get('hpt_attrs', '{}')) hpt_order_path = params.get('hpt_order_path', '') cpu_attrs = eval(params.get('cpu_attrs', '{}')) numa_cell = eval(params.get('numa_cell', '{}')) hugepage = 'yes' == params.get('hugepage', 'no') maxpagesize = int(params.get('maxpagesize', 0)) check_hp = 'yes' == params.get('check_hp', 'no') qemu_check = params.get('qemu_check', '') skip_p8 = 'yes' == params.get('skip_p8', 'no') def set_hpt(vmxml, sync, **attrs): """ Set resizing value to vm xml :param vmxml: xml of vm to be manipulated :param sync: whether to sync vmxml after :param attrs: attrs to set to hpt xml """ if vmxml.xmltreefile.find('/features'): features_xml = vmxml.features else: features_xml = vm_xml.VMFeaturesXML() hpt_xml = vm_xml.VMFeaturesHptXML() for attr in attrs: setattr(hpt_xml, attr, attrs[attr]) features_xml.hpt = hpt_xml vmxml.features = features_xml logging.debug(vmxml) if sync: vmxml.sync() def set_cpu(vmxml, **attrs): """ Set cpu attrs for vmxml according to given attrs :param vmxml: xml of vm to be manipulated :param attrs: attrs to set to cpu xml """ if vmxml.xmltreefile.find('cpu'): cpu = vmxml.cpu else: cpu = vm_xml.VMCPUXML() if 'numa_cell' in attrs: cpu.xmltreefile.create_by_xpath('/numa') cpu.numa_cell = attrs['numa_cell'] for key in attrs: setattr(cpu, key, attrs[key]) vmxml.cpu = cpu vmxml.sync() def set_memory(vmxml): """ Set memory attributes in vm xml """ vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848)) vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16)) vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB') logging.debug(numa_cell) if numa_cell: # Remove cpu topology to avoid that it doesn't match vcpu count if vmxml.get_cpu_topology(): new_cpu = vmxml.cpu new_cpu.del_topology() vmxml.cpu = new_cpu vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1 vmxml.sync() def check_hpt_order(session, resizing=''): """ Return htp order in hpt_order file by default If 'resizing' is disabled, test updating htp_order """ if not hpt_order_path: test.cancel('No hpt order path provided.') hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) if resizing == 'disabled': cmd_result = session.cmd_status_output( 'echo %d > %s' % (hpt_order + 1, hpt_order_path)) result = process.CmdResult(stderr=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result, True) libvirt.check_result(result, error_msg) return hpt_order def check_hp_in_vm(session, page_size): """ Check if hugepage size is correct inside vm :param session: the session of the running vm :param page_size: the expected pagesize to be checked inside vm """ expect = False if int(page_size) == 65536 else True meminfo = session.cmd_output('cat /proc/meminfo|grep Huge') logging.info('meminfo: \n%s', meminfo) pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024) logging.info('"%s" should %s be found in meminfo output', pattern, '' if expect else 'not') result = expect == bool(re.search(pattern, meminfo)) if not result: test.fail('meminfo output not meet expectation') # Check PAGE_SIZE in another way if not expect: conf_page_size = session.cmd_output('getconf PAGE_SIZE') logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size) if int(conf_page_size) != int(page_size): test.fail( 'PAGE_SIZE not correct, should be %r, actually is %r' % (page_size, conf_page_size)) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: arch = platform.machine() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) resizing = hpt_attrs.get('resizing') # Test on ppc64le hosts if arch.lower() == 'ppc64le': cpu_arch = cpu.get_cpu_arch() logging.debug('cpu_arch is: %s', cpu_arch) if skip_p8 and cpu_arch == 'power8': test.cancel('This case is not for POWER8') if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0): test.cancel('Qemu version is too low, ' 'does not support maxpagesize setting') if maxpagesize == 16384 and cpu_arch == 'power9': test.cancel('Power9 does not support 16M pagesize.') set_hpt(vmxml, True, **hpt_attrs) if cpu_attrs or numa_cell: if numa_cell: cpu_attrs['numa_cell'] = numa_cell set_cpu(vmxml, **cpu_attrs) if hugepage: vm_mem = vmxml.max_mem host_hp_size = utils_memory.get_huge_page_size() # Make 100m extra memory just to be safe hp_count = max((vm_mem + 102400) // host_hp_size, 1200) vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True) # Set up hugepage env mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs' if not os.path.isdir(hp_path): process.run('mkdir %s' % hp_path, verbose=True) utils_memory.set_num_huge_pages(hp_count) if utils_misc.is_mounted(mnt_source, hp_path, fstype, verbose=True): utils_misc.umount(mnt_source, hp_path, fstype, verbose=True) utils_misc.mount(mnt_source, hp_path, fstype, verbose=True) # Restart libvirtd service to make sure mounted hugepage # be recognized utils_libvirtd.libvirtd_restart() if resizing == 'enabled': set_memory(vmxml) logging.debug('vmxml: \n%s', vmxml) # Start vm and check if start succeeds result = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result, expect_error=status_error) # if vm is not suposed to start, terminate test if status_error: libvirt.check_result(result, error_msg) return libvirt.check_qemu_cmd_line(qemu_check) session = vm.wait_for_login() hpt_order = check_hpt_order(session, resizing) # Check hugepage inside vm if check_hp: check_hp_in_vm(session, maxpagesize * 1024) if resizing == 'enabled': mem_xml = utils_hotplug.create_mem_xml( tg_size=int(params.get('mem_size', 2048000)), tg_sizeunit=params.get('size_unit', 'KiB'), tg_node=int(params.get('mem_node', 0)), mem_model=params.get('mem_model', 'dimm')) logging.debug(mem_xml) # Attach memory device to the guest for 12 times # that will reach the maxinum memory limitation for i in range(12): virsh.attach_device(vm_name, mem_xml.xml, debug=True, ignore_status=False) xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(xml_after_attach) # Check dumpxml of the guest, # check if each device has its alias for i in range(12): pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i logging.debug('Searching for %s', pattern) if not re.search(pattern, str( xml_after_attach.xmltreefile)): test.fail('Missing memory alias: %s' % pattern) # Test on non-ppc64le hosts else: set_hpt(vmxml, sync=False, **hpt_attrs) result = virsh.define(vmxml.xml) libvirt.check_exit_status(result, status_error) libvirt.check_result(result, error_msg) finally: bk_xml.sync() if hugepage: utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs') utils_memory.set_num_huge_pages(0)
def run(test, params, env): """ Test command: virsh event and virsh qemu-monitor-event 1. Run virsh event/qemu-monitor-event in a new ShellSession 2. Trigger various events 3. Catch the return of virsh event and qemu-monitor-event, and check it. """ vms = [] if params.get("multi_vms") == "yes": vms = env.get_all_vms() else: vm_name = params.get("main_vm") vms.append(env.get_vm(vm_name)) event_name = params.get("event_name") event_all_option = "yes" == params.get("event_all_option", "no") event_list_option = "yes" == params.get("event_list_option", "no") event_loop = "yes" == params.get("event_loop", "no") event_timeout = params.get("event_timeout") event_option = params.get("event_option", "") status_error = "yes" == params.get("status_error", "no") qemu_monitor_test = "yes" == params.get("qemu_monitor_test", "no") signal_name = params.get("signal", None) panic_model = params.get("panic_model") addr_type = params.get("addr_type") addr_iobase = params.get("addr_iobase") disk_format = params.get("disk_format", "") disk_prealloc = "yes" == params.get("disk_prealloc", "yes") event_cmd = "event" dump_path = '/var/lib/libvirt/qemu/dump' part_format = params.get("part_format") strict_order = "yes" == params.get("strict_order", "no") if qemu_monitor_test: event_cmd = "qemu-monitor-event" events_list = params.get("events_list") if events_list: events_list = events_list.split(",") else: events_list = [] virsh_dargs = {'debug': True, 'ignore_status': True} virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC) for dom in vms: if dom.is_alive(): dom.destroy() vmxml_backup = [] for dom in vms: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(dom.name) vmxml_backup.append(vmxml.copy()) tmpdir = data_dir.get_tmp_dir() mount_point = tmpdir small_part = os.path.join(tmpdir, params.get("part_name", "io-error_part")) def create_iface_xml(): """ Create interface xml file """ iface = Interface("bridge") iface.source = eval("{'bridge':'virbr0'}") iface.model = "virtio" logging.debug("Create new interface xml: %s", iface) return iface def add_disk(vm_name, init_source, target_device, extra_param, format=''): """ Add disk/cdrom for test vm :param vm_name: guest name :param init_source: source file :param target_device: target of disk device :param extra_param: additional arguments to command :param format: init_source format(qcow2 or raw) """ if not os.path.exists(init_source): disk_size = params.get("disk_size", "1G") if format == "qcow2": create_option = "" if not disk_prealloc else "-o preallocation=full" process.run('qemu-img create -f qcow2 %s %s %s' % (init_source, disk_size, create_option), shell=True, verbose=True) elif format == "raw": process.run('qemu-img create -f raw %s %s' % (init_source, disk_size), shell=True, verbose=True) else: open(init_source, 'a').close() if virsh.is_alive(vm_name) and 'cdrom' in extra_param: virsh.destroy(vm_name) if 'cdrom' in extra_param: init_source = "''" virsh.attach_disk(vm_name, init_source, target_device, extra_param, **virsh_dargs) vmxml_disk = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("Current vmxml after adding disk is %s\n" % vmxml_disk) def wait_for_shutoff(vm): """ Wait for the vm to reach state shutoff :param vm: VM instance """ def is_shutoff(): state = vm.state() logging.debug("Current state: %s", state) return "shut off" in state utils_misc.wait_for(is_shutoff, timeout=90, first=1, step=1, text="Waiting for vm state to be shut off") def prepare_vmxml_mem(vmxml): """ Prepare memory and numa settings in vmxml before hotplug dimm param vmxml: guest current xml """ # Prepare memory settings vmxml.max_mem_rt = int(params.get("max_mem")) vmxml.max_mem_rt_slots = int(params.get("maxmem_slots")) mem_unit = params.get("mem_unit") vmxml.max_mem_rt_unit = mem_unit current_mem = int(params.get("current_mem")) vmxml.current_mem = current_mem vmxml.current_mem_unit = mem_unit vmxml.memory = int(params.get("memory")) # Prepare numa settings in <cpu> host_numa_node = utils_misc.NumaInfo() host_numa_node_list = host_numa_node.online_nodes numa_nodes = len(host_numa_node_list) if numa_nodes == 0: test.cancel("No host numa node available") numa_dict = {} numa_dict_list = [] cpu_idx = 0 for index in range(numa_nodes): numa_dict['id'] = str(index) numa_dict['memory'] = str(current_mem // numa_nodes) numa_dict['unit'] = mem_unit numa_dict['cpus'] = "%s-%s" % (str(cpu_idx), str(cpu_idx + 1)) cpu_idx += 2 numa_dict_list.append(numa_dict) numa_dict = {} vmxml.vcpu = numa_nodes * 2 vmxml_cpu = vm_xml.VMCPUXML() vmxml_cpu.xml = "<cpu><numa/></cpu>" vmxml_cpu.numa_cell = vmxml_cpu.dicts_to_cells(numa_dict_list) logging.debug(vmxml_cpu.numa_cell) vmxml.cpu = vmxml_cpu vmxml.sync() def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) dest_path = os.path.join(data_dir.get_data_dir(), "copy") try: for event in events_list: logging.debug("Current event is: %s", event) if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash', 'device-removal-failed', 'watchdog', 'io-error']: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName("description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=90) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync") time.sleep(5) session.close() expected_events_list.append("'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk(dom.name, new_disk, target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occured: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) snapshot_path = dom.get_blk_devices()['vda']['source'] virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % snapshot_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda ready") expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % disk_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda completed") os.unlink(snapshot_path) elif event == "blockcopy": disk_path = dom.get_blk_devices()['vda']['source'] dom.undefine() virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % disk_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda ready") expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % dest_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda completed") elif event == "detach-dimm": prepare_vmxml_mem(vmxml) tg_size = params.get("dimm_size") tg_sizeunit = params.get("dimm_unit") dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit) virsh.attach_device(dom.name, dimm_xml.xml, flagstr="--config", **virsh_dargs) vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm) virsh.start(dom.name, **virsh_dargs) dom.wait_for_login().close() result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True) expected_fails = params.get("expected_fails") utlv.check_result(result, expected_fails) vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live) expected_events_list.append("'device-removal-failed' for %s: dimm0") elif event == "watchdog": vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = params.get("watchdog_model") action = params.get("action") watchdog_dev.action = action vmxml.add_device(watchdog_dev) vmxml.sync() logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml) virsh.start(dom.name, **virsh_dargs) session = dom.wait_for_login() try: session.cmd("echo 0 > /dev/watchdog") except (ShellTimeoutError, ShellProcessTerminatedError) as details: test.fail("Failed to trigger watchdog: %s" % details) session.close() # watchdog acts slowly, waiting for it. time.sleep(30) expected_events_list.append("'watchdog' for %s: " + "%s" % action) if action == 'pause': expected_events_list.append("'lifecycle' for %s: Suspended Watchdog") virsh.resume(dom.name, **virsh_dargs) else: # action == 'reset' expected_events_list.append("'reboot' for %s") elif event == "io-error": part_size = params.get("part_size") resume_event = params.get("resume_event") suspend_event = params.get("suspend_event") process.run("truncate -s %s %s" % (part_size, small_part), shell=True) utlv.mkfs(small_part, part_format) utils_misc.mount(small_part, mount_point, None) add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2') dom.start() session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True) time.sleep(5) session.close() expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) process.run("df -hT") virsh.resume(dom.name, **virsh_dargs) time.sleep(5) expected_events_list.append(resume_event) expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) if os.path.exists(dest_path): os.unlink(dest_path) return [(dom.name, event) for event in expected_events_list] def check_output(output, expected_events_list): """ Check received domain event in output. :param output: The virsh shell output, such as: Welcome to virsh, the virtualization interactive terminal. Type: 'help' for help with commands 'quit' to quit virsh # event 'lifecycle' for domain avocado-vt-vm1: Started Booted events received: 1 virsh # :param expected_events_list: A list of expected events """ logging.debug("Actual events: %s", output) event_idx = 0 for dom_name, event in expected_events_list: if event in expected_events_list[0] and not strict_order: event_idx = 0 if re.search("block-threshold", event): event_str = "block-threshold" else: event_str = "event " + event % ("domain '%s'" % dom_name) logging.info("Expected event: %s", event_str) match = re.search(event_str, output[event_idx:]) if match: event_idx = event_idx + match.start(0) + len(match.group(0)) continue else: test.fail("Not find expected event:%s. Is your " "guest too slow to get started in %ss?" % (event_str, event_timeout)) # Extra event check for io-error resume if events_list == ['io-error']: event_str = "event 'lifecycle' for domain " + vm_name + ": Resumed Unpaused" if re.search(event_str, output[event_idx:]): test.fail("Extra 'resume' occurred after I/O error!") try: # Set vcpu placement to static to avoid emulatorpin fail vmxml.placement = 'static' # Using a large memeoy(>1048576) to avoid setmem fail vmxml.max_mem = 2097152 vmxml.current_mem = 2097152 vmxml.sync() if event_all_option and not qemu_monitor_test: event_option += " --all" if event_list_option: event_option += " --list" if event_loop: event_option += " --loop" if not status_error and not event_list_option: event_cmd += " %s" % event_option if event_name and not qemu_monitor_test: event_cmd += " --event %s" % event_name if event_timeout: event_cmd += " --timeout %s" % event_timeout # Run the command in a new virsh session, then waiting for # various events logging.info("Sending '%s' to virsh shell", event_cmd) virsh_session.sendline(event_cmd) elif qemu_monitor_test: result = virsh.qemu_monitor_event(event=event_name, event_timeout=event_timeout, options=event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) else: result = virsh.event(event=event_name, event_timeout=event_timeout, options=event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) if not status_error: if not event_list_option: expected_events_list = [] virsh_dargs['ignore_status'] = False for dom in vms: expected_events_list.extend(trigger_events(dom, events_list)) if event_timeout: # Make sure net-event will timeout on time time.sleep(int(event_timeout)) elif event_loop: virsh_session.send_ctrl("^C") time.sleep(5) ret_output = virsh_session.get_stripped_output() if qemu_monitor_test: # Not check for qemu-monitor-event output expected_events_list = [] check_output(ret_output, expected_events_list) finally: for dom in vms: if dom.is_alive(): dom.destroy() virsh_session.close() for xml in vmxml_backup: xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path) os.mkdir(dump_path) if utils_misc.is_mounted("/dev/loop0", mount_point, part_format): utils_misc.umount("/dev/loop0", mount_point, part_format) if os.path.exists(small_part): os.unlink(small_part)
def run(test, params, env): """ Test multiple disks attachment. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} gluster_server_name = params.get("gluster_server_name") # If gluster_server is specified from config file, just use this gluster server. if 'EXAMPLE' not in gluster_server_name: params.update({'gluster_server_ip': gluster_server_name}) def prepare_gluster_disk(disk_img, disk_format): """ Setup glusterfs and prepare disk image. """ # Get the image path image_source = vm.get_first_disk_devices()['source'] # Setup gluster host_ip = gluster.setup_or_cleanup_gluster(True, brick_path=brick_path, **params) logging.debug("host ip: %s ", host_ip) image_info = utils_misc.get_image_info(image_source) image_dest = "/mnt/%s" % disk_img if image_info["format"] == disk_format: disk_cmd = ("cp -f %s %s" % (image_source, image_dest)) else: # Convert the disk format disk_cmd = ( "qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, image_dest)) # Mount the gluster disk and create the image. process.run("mount -t glusterfs %s:%s /mnt && " "%s && chmod a+rw /mnt/%s && umount /mnt" % (host_ip, vol_name, disk_cmd, disk_img), shell=True) return host_ip def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} if driver_iothread: driver_dict.update({"iothread": driver_iothread}) disk_xml.driver = driver_dict disk_xml.target = {"dev": "vdb", "bus": "virtio"} if default_pool: utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool, "glusterfs") process.run("setsebool virt_use_fusefs on", shell=True) source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = { "protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img) } host_dict = [{"name": host_ip, "port": "24007"}] # If mutiple_hosts is True, attempt to add multiple hosts. if multiple_hosts: host_dict.append({ "name": params.get("dummy_host1"), "port": "24007" }) host_dict.append({ "name": params.get("dummy_host2"), "port": "24007" }) if transport: host_dict[0]['transport'] = transport disk_xml.source = disk_xml.new_disk_source(**{ "attrs": source_dict, "hosts": host_dict }) return disk_xml def test_pmsuspend(vm_name): """ Test pmsuspend command. """ if vm.is_dead(): vm.start() vm.wait_for_login() # Create swap partition if necessary. if not vm.has_swap(): swap_path = os.path.join(data_dir.get_data_dir(), 'swap.img') vm.create_swap_partition(swap_path) ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs) libvirt.check_exit_status(ret) # wait for vm to shutdown if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60): test.fail("vm is still alive after S4 operation") # Wait for vm and qemu-ga service to start vm.start() # Prepare guest agent and start guest try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent:\n%s" % detail) #TODO This step may hang for rhel6 guest ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs) libvirt.check_exit_status(ret) # Check vm state if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60): test.fail("vm isn't suspended after S3 operation") ret = virsh.dompmwakeup(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) if not vm.is_alive(): test.fail("vm is not alive after dompmwakeup") # Disk specific attributes. pm_enabled = "yes" == params.get("pm_enabled", "no") gluster_disk = "yes" == params.get("gluster_disk", "no") disk_format = params.get("disk_format", "qcow2") vol_name = params.get("vol_name") transport = params.get("transport", "") default_pool = params.get("default_pool", "") pool_name = params.get("pool_name") driver_iothread = params.get("driver_iothread") dom_iothreads = params.get("dom_iothreads") brick_path = os.path.join(test.virtdir, pool_name) test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") # Gluster server multiple hosts flag. multiple_hosts = "yes" == params.get("multiple_hosts", "no") pre_vm_state = params.get("pre_vm_state", "running") # Destroy VM first. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml = vmxml_backup.copy() mnt_src = "" # This is brought by new feature:block-dev if transport == "rdma": test.cancel("transport protocol 'rdma' is not yet supported") try: # Build new vm xml. if pm_enabled: vm_xml.VMXML.set_pm_suspend(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) logging.debug("Attempting to set guest agent channel") vmxml.set_agent_channel() vmxml.sync() if gluster_disk: # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format host_ip = prepare_gluster_disk(disk_img, disk_format) mnt_src = "%s:%s" % (host_ip, vol_name) global custom_disk custom_disk = build_disk_xml(disk_img, disk_format, host_ip) start_vm = "yes" == params.get("start_vm", "yes") # set domain options if dom_iothreads: try: vmxml.iothreads = int(dom_iothreads) vmxml.sync() except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert '%s' to integer type", dom_iothreads) # If hot plug, start VM first, otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() # If gluster_disk is True, use attach_device. attach_option = params.get("attach_option", "") if gluster_disk: cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs, debug=True) libvirt.check_exit_status(cmd_result) # Turn VM into certain state. if pre_vm_state == "running": logging.info("Starting %s...", vm_name) if vm.is_dead(): vm.start() elif pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.skip("can't create the domain") # Run the tests. if pm_enabled: # Makesure the guest agent is started try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent: %s" % detail) # Run dompmsuspend command. test_pmsuspend(vm_name) # After block-dev introduced in libvirt 6.0.0 afterwards, gluster+%s.*format information is not provided from qemu output if libvirt_version.version_compare(6, 0, 0): test_qemu_cmd = False if test_qemu_cmd: # Check qemu-kvm command line cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if transport == "rdma": cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format) else: cmd += " | grep gluster.*format=%s" % disk_format if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see gluster option '%s' " "in command line" % cmd) # Detach hot plugged device. if start_vm and not default_pool: if gluster_disk: ret = virsh.detach_device(vm_name, custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs, wait_for_event=True) libvirt.check_exit_status(ret) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if utils_misc.is_mounted(mnt_src, default_pool, 'fuse.glusterfs', verbose=True): process.run("umount %s" % default_pool, ignore_status=True, shell=True) if gluster_disk: gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): raise error.TestNAError("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM")
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: test.cancel("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': test.cancel("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError as e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e test.fail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): test.cancel("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM") if test_type == "contrast": # wait for vm finish starting completely time.sleep(delay) if not (mb_enable and not tlbfs_enable): logging.debug("starting analyzing the hugepage usage...") pid = vms[-1].get_pid() started_free = utils_memory.get_num_huge_pages_free() # Get the thp usage from /proc/pid/smaps started_anon = utils_memory.get_num_anon_huge_pages(pid) static_used = non_started_free - started_free hugepage_used = static_used * page_size if test_type == "contrast": # get qemu-kvm memory consumption by top cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid rate = process.run(cmd, ignore_status=False, verbose=True, shell=True).stdout_text.strip() qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100 logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d", rate, qemu_kvm_used, hugepage_used) if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1): test.fail("Error for hugepage usage") if test_type == "stress": if non_started_free <= started_free: logging.debug("hugepage usage:%d -> %d", non_started_free, started_free) test.fail("Error for hugepage usage with stress") if mb_enable is not True: if static_used > 0: test.fail("VM use static hugepage without" " memoryBacking element") if thp_enable is not True and started_anon > 0: test.fail("VM use transparent hugepage, while" " it's disabled") else: if tlbfs_enable is not True: if static_used > 0: test.fail("VM use static hugepage without tlbfs" " mounted") if thp_enable and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage") else: if shp_num > 0: if static_used <= 0: test.fail("VM doesn't use static" " hugepage") else: if static_used > 0: test.fail("VM use static hugepage," " while it's set to zero") if thp_enable is not True: if started_anon > 0: test.fail("VM use transparent hugepage," " while it's disabled") else: if shp_num == 0 and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)
# Check qemu-kvm command line cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if transport == "rdma": cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format) else: cmd += " | grep gluster.*format=%s" % disk_format if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see gluster option '%s' " "in command line" % cmd) # Detach hot plugged device. if start_vm and not default_pool: if gluster_disk: ret = virsh.detach_device(vm_name, custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(ret) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'): process.run("umount %s" % default_pool, ignore_status=True, shell=True) if gluster_disk: libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() if test_type == "contrast": range = float(params.get("mem_error_range", 1.25)) elif test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("Hugepage Stress Test need two VM(s).") # confirm no VM(s) running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VM(s) is living.") else: vm_names.append(params.get("main_vm")) # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) pass # jump out of for-loop else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg)
def run(test, params, env): """ Test multiple disks attachment. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def prepare_gluster_disk(disk_img, disk_format): """ Setup glusterfs and prepare disk image. """ # Get the image path image_source = vm.get_first_disk_devices()['source'] # Setup gluster host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name, brick_path, pool_name) logging.debug("host ip: %s ", host_ip) image_info = utils_misc.get_image_info(image_source) image_dest = "/mnt/%s" % disk_img if image_info["format"] == disk_format: disk_cmd = ("cp -f %s %s" % (image_source, image_dest)) else: # Convert the disk format disk_cmd = ("qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, image_dest)) # Mount the gluster disk and create the image. process.run("mount -t glusterfs %s:%s /mnt && " "%s && chmod a+rw /mnt/%s && umount /mnt" % (host_ip, vol_name, disk_cmd, disk_img), shell=True) return host_ip def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} if driver_iothread: driver_dict.update({"iothread": driver_iothread}) disk_xml.driver = driver_dict disk_xml.target = {"dev": "vdb", "bus": "virtio"} if default_pool: utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool, "glusterfs") process.run("setsebool virt_use_fusefs on", shell=True) source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = {"protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img)} host_dict = [{"name": host_ip, "port": "24007"}] # If mutiple_hosts is True, attempt to add multiple hosts. if multiple_hosts: host_dict.append({"name": params.get("dummy_host1"), "port": "24007"}) host_dict.append({"name": params.get("dummy_host2"), "port": "24007"}) if transport: host_dict[0]['transport'] = transport disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict, "hosts": host_dict}) return disk_xml def test_pmsuspend(vm_name): """ Test pmsuspend command. """ if vm.is_dead(): vm.start() vm.wait_for_login() # Create swap partition if nessesary. if not vm.has_swap(): swap_path = os.path.join(data_dir.get_tmp_dir(), 'swap.img') vm.create_swap_partition(swap_path) ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs) libvirt.check_exit_status(ret) # wait for vm to shutdown if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60): test.fail("vm is still alive after S4 operation") # Wait for vm and qemu-ga service to start vm.start() # Prepare guest agent and start guest try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent:\n%s" % detail) #TODO This step may hang for rhel6 guest ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs) libvirt.check_exit_status(ret) # Check vm state if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60): test.fail("vm isn't suspended after S3 operation") ret = virsh.dompmwakeup(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) if not vm.is_alive(): test.fail("vm is not alive after dompmwakeup") # Disk specific attributes. pm_enabled = "yes" == params.get("pm_enabled", "no") gluster_disk = "yes" == params.get("gluster_disk", "no") disk_format = params.get("disk_format", "qcow2") vol_name = params.get("vol_name") transport = params.get("transport", "") default_pool = params.get("default_pool", "") pool_name = params.get("pool_name") driver_iothread = params.get("driver_iothread") dom_iothreads = params.get("dom_iothreads") brick_path = os.path.join(test.virtdir, pool_name) test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") # Gluster server multiple hosts flag. multiple_hosts = "yes" == params.get("multiple_hosts", "no") pre_vm_state = params.get("pre_vm_state", "running") # Destroy VM first. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml = vmxml_backup.copy() mnt_src = "" try: # Build new vm xml. if pm_enabled: vm_xml.VMXML.set_pm_suspend(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) logging.debug("Attempting to set guest agent channel") vmxml.set_agent_channel() vmxml.sync() if gluster_disk: # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format host_ip = prepare_gluster_disk(disk_img, disk_format) mnt_src = "%s:%s" % (host_ip, vol_name) global custom_disk custom_disk = build_disk_xml(disk_img, disk_format, host_ip) start_vm = "yes" == params.get("start_vm", "yes") # set domain options if dom_iothreads: try: vmxml.iothreads = int(dom_iothreads) vmxml.sync() except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert '%s' to integer type", dom_iothreads) if default_pool: disks_dev = vmxml.get_devices(device_type="disk") for disk in disks_dev: vmxml.del_device(disk) vmxml.sync() # If hot plug, start VM first, otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() # If gluster_disk is True, use attach_device. attach_option = params.get("attach_option", "") if gluster_disk: cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs, debug=True) libvirt.check_exit_status(cmd_result) # Turn VM into certain state. if pre_vm_state == "running": logging.info("Starting %s...", vm_name) if vm.is_dead(): vm.start() elif pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.skip("can't create the domain") # Run the tests. if pm_enabled: # Makesure the guest agent is started try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError) as detail: test.fail("failed to prepare agent: %s" % detail) # Run dompmsuspend command. test_pmsuspend(vm_name) if test_qemu_cmd: # Check qemu-kvm command line cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if transport == "rdma": cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format) else: cmd += " | grep gluster.*format=%s" % disk_format if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see gluster option '%s' " "in command line" % cmd) # Detach hot plugged device. if start_vm and not default_pool: if gluster_disk: ret = virsh.detach_device(vm_name, custom_disk.xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(ret) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'): process.run("umount %s" % default_pool, ignore_status=True, shell=True) if gluster_disk: libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)