def run(test, params, env): """ Test auto_dump_* parameter in qemu.conf. 1) Change auto_dump_* in qemu.conf; 2) Restart libvirt daemon; 4) Check if file open state changed accordingly. """ vm_name = params.get("main_vm", "avocado-vt-vm1") bypass_cache = params.get("auto_dump_bypass_cache", "not_set") panic_model = params.get("panic_model") addr_type = params.get("addr_type") addr_iobase = params.get("addr_iobase") vm = env.get_vm(vm_name) target_flags = int(params.get('target_flags', '0o40000'), 8) if panic_model and not libvirt_version.version_compare(1, 3, 1): test.cancel("panic device model attribute not supported" "on current libvirt version") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() dump_path = os.path.join(data_dir.get_tmp_dir(), "dump") try: if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() if panic_model: panic_dev.model = panic_model if addr_type: panic_dev.addr_type = addr_type if addr_iobase: panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if not vmxml.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it") # Setup qemu.conf if bypass_cache == 'not_set': del config.auto_dump_bypass_cache else: config.auto_dump_bypass_cache = bypass_cache config.auto_dump_path = dump_path if os.path.exists(dump_path): os.rmdir(dump_path) os.mkdir(dump_path) # Restart libvirtd to make change valid. libvirtd.restart() # Restart VM to create a new qemu process. if vm.is_alive(): vm.destroy() vm.start() def get_flags(dump_path, result_dict): cmd = "lsof -w %s/* |awk '/libvirt_i/{print $2}'" % dump_path start_time = time.time() while (time.time() - start_time) < 30: ret = process.run(cmd, shell=True, ignore_status=True) status, iohelper_pid = ret.exit_status, ret.stdout_text.strip() if status: time.sleep(0.1) continue if not len(iohelper_pid): continue else: logging.info('pid: %s', iohelper_pid) result_dict['pid'] = iohelper_pid break # Get file open flags containing bypass cache information. with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo: flags = 0 for line in fdinfo.readlines(): if line.startswith('flags:'): flags = int(line.split()[1], 8) logging.debug('file open flag is: %o', flags) result_dict['flags'] = flags with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo: cmdline = cmdinfo.readline() logging.debug(cmdline.split()) session = vm.wait_for_login() result_dict = multiprocessing.Manager().dict() child_process = multiprocessing.Process(target=get_flags, args=(dump_path, result_dict)) child_process.start() # Stop kdump in the guest session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=1) except ShellTimeoutError: pass session.close() child_process.join(10) if child_process.is_alive(): child_process.kill() flags = result_dict['flags'] iohelper_pid = result_dict['pid'] # Kill core dump process to speed up test try: process.run('kill %s' % iohelper_pid) except process.CmdError as detail: logging.debug("Dump already done:\n%s", detail) arch = platform.machine() if arch in ['x86_64', 'ppc64le', 's390x']: # Check if bypass cache flag set or unset accordingly. cond1 = (flags & target_flags) and bypass_cache != '1' cond2 = not (flags & target_flags) and bypass_cache == '1' if cond1 or cond2: test.fail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) else: test.cancel("Unknown Arch. Do the necessary changes to" " support") finally: backup_xml.sync() config.restore() libvirtd.restart() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def run(test, params, env): """ Test command: virsh domstats. 1.Prepare vm state. 2.Perform virsh domstats operation. 3.Confirm the test result. 4.Recover test environment. """ default_vm_name = params.get("main_vm", "virt-tests-vm1") default_vm = env.get_vm(default_vm_name) vm_list = params.get("vm_list", "") vm_state = params.get("vm_state", "") domstats_option = params.get("domstats_option") raw_print = "yes" == params.get("raw_print", "no") enforce_command = "yes" == params.get("enforce_command", "no") status_error = (params.get("status_error", "no") == "yes") vms = [default_vm] if vm_list: for name in vm_list.split(): if name != default_vm_name: vms.append(env.get_vm(name)) backup_xml_list = [] try: if not status_error: for vm in vms: # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) backup_xml_list.append(vmxml.copy()) if vm_state == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = "preserve" # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() virsh.start(vm.name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm.name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise error.TestNAError("No 'panic' device in the guest," " maybe your libvirt version" " doesn't support it.") prepare_vm_state(vm, vm_state) if enforce_command: domstats_option += " --enforce" if raw_print: domstats_option += " --raw" # Run virsh command result = virsh.domstats(vm_list, domstats_option, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if "unsupported flags" in result.stderr: raise error.TestNAError(result.stderr) raise error.TestFail("Run successfully with wrong command!") else: if status: raise error.TestFail("Run failed with right command") else: for vm in vms: if not check_output(output, vm, vm_state, domstats_option): raise error.TestFail("Check command output failed") finally: try: for vm in vms: vm.destroy(gracefully=False) except AttributeError: pass for backup_xml in backup_xml_list: backup_xml.sync()
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") domid = vm.get_id() domuuid = vm.get_uuid() libvirtd_service = utils_libvirtd.Libvirtd() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() dump_path = os.path.join(test.tmpdir, "dump/") os.mkdir(dump_path) dump_file = "" try: if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd_service.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + "*" + vm_name + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise exceptions.TestSkipError( "No 'panic' device in the guest. Maybe your libvirt " "version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) virsh.start(vm_name, ignore_status=False) elif vm_action == "kill": libvirtd_service.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd_service.restart() elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() except process.CmdError, detail: raise exceptions.TestError("Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd_service.stop() if vm_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise exceptions.TestSkipError( "Test 'remote' parameters not setup") status = 0 try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s domstate %s" % (remote_uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except process.CmdError: status = 1 else: result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: raise exceptions.TestFail( "Run successfully with wrong command!") else: if status or not output: raise exceptions.TestFail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): raise ActionError(vm_action) elif vm_action == "resume": if not output.count("unpaused"): raise ActionError(vm_action) elif vm_action == "destroy": if not output.count("destroyed"): raise ActionError(vm_action) elif vm_action == "start": if not output.count("booted"): raise ActionError(vm_action) elif vm_action == "kill": if not output.count("crashed"): raise ActionError(vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): raise ActionError(vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search( "blocked", output) or re.search("idle", output)): raise exceptions.TestFail("Run failed with right command")
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) dest_path = os.path.join(data_dir.get_data_dir(), "copy") try: for event in events_list: logging.debug("Current event is: %s", event) if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash', 'device-removal-failed', 'watchdog', 'io-error']: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName("description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=90) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync") time.sleep(5) session.close() expected_events_list.append("'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk(dom.name, new_disk, target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occured: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) snapshot_path = dom.get_blk_devices()['vda']['source'] virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % snapshot_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda ready") expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % disk_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda completed") os.unlink(snapshot_path) elif event == "blockcopy": disk_path = dom.get_blk_devices()['vda']['source'] dom.undefine() virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % disk_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda ready") expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % dest_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda completed") elif event == "detach-dimm": prepare_vmxml_mem(vmxml) tg_size = params.get("dimm_size") tg_sizeunit = params.get("dimm_unit") dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit) virsh.attach_device(dom.name, dimm_xml.xml, flagstr="--config", **virsh_dargs) vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm) virsh.start(dom.name, **virsh_dargs) dom.wait_for_login().close() result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True) expected_fails = params.get("expected_fails") utlv.check_result(result, expected_fails) vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live) expected_events_list.append("'device-removal-failed' for %s: dimm0") elif event == "watchdog": vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = params.get("watchdog_model") action = params.get("action") watchdog_dev.action = action vmxml.add_device(watchdog_dev) vmxml.sync() logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml) virsh.start(dom.name, **virsh_dargs) session = dom.wait_for_login() try: session.cmd("echo 0 > /dev/watchdog") except (ShellTimeoutError, ShellProcessTerminatedError) as details: test.fail("Failed to trigger watchdog: %s" % details) session.close() # watchdog acts slowly, waiting for it. time.sleep(30) expected_events_list.append("'watchdog' for %s: " + "%s" % action) if action == 'pause': expected_events_list.append("'lifecycle' for %s: Suspended Watchdog") virsh.resume(dom.name, **virsh_dargs) else: # action == 'reset' expected_events_list.append("'reboot' for %s") elif event == "io-error": part_size = params.get("part_size") resume_event = params.get("resume_event") suspend_event = params.get("suspend_event") process.run("truncate -s %s %s" % (part_size, small_part), shell=True) utlv.mkfs(small_part, part_format) utils_misc.mount(small_part, mount_point, None) add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2') dom.start() session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True) time.sleep(5) session.close() expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) process.run("df -hT") virsh.resume(dom.name, **virsh_dargs) time.sleep(5) expected_events_list.append(resume_event) expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) if os.path.exists(dest_path): os.unlink(dest_path) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ Test auto_dump_* parameter in qemu.conf. 1) Change auto_dump_* in qemu.conf; 2) Restart libvirt daemon; 4) Check if file open state changed accordingly. """ vm_name = params.get("main_vm", "avocado-vt-vm1") bypass_cache = params.get("auto_dump_bypass_cache", "not_set") panic_model = params.get("panic_model") addr_type = params.get("addr_type") addr_iobase = params.get("addr_iobase") vm = env.get_vm(vm_name) if panic_model and not libvirt_version.version_compare(1, 3, 1): test.cancel("panic device model attribute not supported" "on current libvirt version") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() dump_path = os.path.join(data_dir.get_tmp_dir(), "dump") try: if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() if panic_model: panic_dev.model = panic_model if addr_type: panic_dev.addr_type = addr_type if addr_iobase: panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if not vmxml.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it") # Setup qemu.conf if bypass_cache == 'not_set': del config.auto_dump_bypass_cache else: config.auto_dump_bypass_cache = bypass_cache config.auto_dump_path = dump_path if os.path.exists(dump_path): os.rmdir(dump_path) os.mkdir(dump_path) # Restart libvirtd to make change valid. libvirtd.restart() # Restart VM to create a new qemu process. if vm.is_alive(): vm.destroy() vm.start() session = vm.wait_for_login() # Stop kdump in the guest session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=1) except ShellTimeoutError: pass session.close() iohelper_pid = process.run('pgrep -f %s' % dump_path).stdout_text.strip() logging.error('%s', iohelper_pid) # Get file open flags containing bypass cache information. with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo: flags = 0 for line in fdinfo.readlines(): if line.startswith('flags:'): flags = int(line.split()[1], 8) logging.debug('file open flag is: %o', flags) with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo: cmdline = cmdinfo.readline() logging.debug(cmdline.split()) # Kill core dump process to speed up test process.run('kill %s' % iohelper_pid) arch = platform.machine() if arch == 'x86_64': # Check if bypass cache flag set or unset accordingly. if (flags & 0o40000) and bypass_cache != '1': test.fail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) if not (flags & 0o40000) and bypass_cache == '1': test.fail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) elif arch == 'ppc64le': # Check if bypass cache flag set or unset accordingly. if (flags & 0o400000) and bypass_cache != '1': test.fail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) if not (flags & 0o400000) and bypass_cache == '1': test.fail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) else: test.cancel("Unknown Arch. Do the necessary changes to" "support") finally: backup_xml.sync() config.restore() libvirtd.restart() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def run(test, params, env): """ Test only ppc hosts """ if 'ppc64le' not in platform.machine().lower(): test.cancel('This case is for ppc only.') vm_name = params.get('main_vm', 'EXAMPLE') status_error = 'yes' == params.get('status_error', 'no') case = params.get('case', '') error_msg = params.get('error_msg', '') # Backup vm xml bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Assign address to panic device if case == 'panic_address': # Check if there is already a panic device on vm, remove it if true origin_panic = vmxml.get_devices('panic') if origin_panic: for dev in origin_panic: vmxml.del_device(dev) vmxml.sync() # Create panic device to add to vm panic_dev = Panic() panic_dev.model = 'pseries' panic_dev.addr_type = 'isa' panic_dev.addr_iobase = '0x505' logging.debug(panic_dev) vmxml.add_device(panic_dev) if version_compare(7, 0, 0): cmd_result = virsh.define(vmxml.xml, debug=True) else: vmxml.sync() cmd_result = virsh.start(vm_name, debug=True, ignore_status=True) # Get Ethernet pci devices if case == 'unavail_pci_device': lspci = process.run('lspci|grep Ethernet', shell=True).stdout_text.splitlines() pci_ids = [line.split()[0] for line in lspci] logging.debug(pci_ids) max_id = max([int(pci_id.split('.')[-1]) for pci_id in pci_ids]) prefix = pci_ids[-1].split('.')[0] # Create fake pci ids for i in range(5): max_id += 1 # function must be <= 7 if max_id > 7: break new_pci_id = '.'.join([prefix, str(max_id)]) new_pci_xml = libvirt.create_hostdev_xml(new_pci_id) vmxml.add_device(new_pci_xml) vmxml.sync() logging.debug('Vm xml after adding unavailable pci devices: \n%s', vmxml) # Check result if there's a result to check if 'cmd_result' in locals(): libvirt.check_exit_status(cmd_result, status_error) if error_msg: libvirt.check_result(cmd_result, [error_msg]) finally: # In case vm disappeared after test if case == 'unavail_pci_device': virsh.define(bk_xml.xml, debug=True) else: bk_xml.sync()
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() try: for event in events_list: logging.debug("Current event is: %s", event) if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash']: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName("description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync") time.sleep(5) session.close() expected_events_list.append("'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk(dom.name, "''", target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occured: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) libvirtd = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") domid = vm.get_id() domuuid = vm.get_uuid() libvirtd_service = utils_libvirtd.Libvirtd() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf utils.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK)) dump_path = os.path.join(test.tmpdir, "dump/") dump_file = "" if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) # Set on_crash action vmxml.on_crash = vm_oncrash_action # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path, QEMU_CONF) utils.run(cmd) libvirtd_service.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + vm_name + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): raise error.TestNAError("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) virsh.start(vm_name, ignore_status=False) elif vm_action == "kill": libvirtd_service.stop() kill_process_by_pattern(vm_name) libvirtd_service.restart() elif vm_action == "crash": session = vm.wait_for_login() # Stop kdump in the guest session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not return # as vm crashed, so fail early for 'destroy' and 'preserve' action. # For 'restart', 'coredump-restart' and 'coredump-destroy' actions, # they all need more time to dump core file or restart OS, so using # the default session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except ShellTimeoutError: pass session.close() except error.CmdError, e: raise error.TestError("Guest prepare action error: %s" % e)
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() try: for event in events_list: if event in [ 'start', 'restore', 'create', 'define', 'undefine', 'crash' ]: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName( "description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append( "'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append( "'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk( dom.name, "''", target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") reset_action = "yes" == params.get("reset_action", "no") dump_option = params.get("dump_option", "") start_action = params.get("start_action", "normal") kill_action = params.get("kill_action", "normal") check_libvirtd_log = params.get("check_libvirtd_log", "no") err_msg = params.get("err_msg", "") remote_uri = params.get("remote_uri") domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Config libvirtd log if check_libvirtd_log == "yes": libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor ' '3:remote 4:event"') libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() # Get image file image_source = vm.get_first_disk_devices()['source'] logging.debug("image source: %s" % image_source) new_image_source = image_source + '.rename' dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/") logging.debug("dump_path: %s", dump_path) try: os.mkdir(dump_path) except OSError: # If the path already exists then pass pass dump_file = "" try: # Let's have guest memory less so that dumping core takes # time which doesn't timeout the testcase if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: memory_value = int(params.get("memory_value", "2097152")) memory_unit = params.get("memory_unit", "KiB") vmxml.set_memory(memory_value) vmxml.set_memory_unit(memory_unit) logging.debug(vmxml) vmxml.sync() if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() if "ppc" not in platform.machine(): panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + "*" + vm_name[:20] + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest. Maybe your " "libvirt version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) if start_action == "rename": # rename the guest image file to make guest fail to start os.rename(image_source, new_image_source) virsh.start(vm_name, ignore_status=True) else: virsh.start(vm_name, ignore_status=False) if start_action == "restart_libvirtd": libvirtd.restart() elif vm_action == "kill": if kill_action == "stop_libvirtd": libvirtd.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd.restart() elif kill_action == "reboot_vm": virsh.reboot(vm_name, ignore_status=False) utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) else: utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() elif vm_action == "dump": dump_file = dump_path + "*" + vm_name + "-*" virsh.dump(vm_name, dump_file, dump_option, ignore_status=False) except process.CmdError as detail: test.error("Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd.stop() # Timing issue cause test to check domstate before prior action # kill gets completed if vm_action == "kill": utils_misc.wait_for(vm.is_dead, timeout=20) if remote_uri: remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") if remote_ip.count("EXAMPLE.COM"): test.cancel("Test 'remote' parameters not setup") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True, uri=remote_uri) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Run successfully with wrong command!") else: if status or not output: test.fail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): test.fail(err_msg % vm_action) elif vm_action == "resume": if not output.count("unpaused"): test.fail(err_msg % vm_action) elif vm_action == "destroy": if not output.count("destroyed"): test.fail(err_msg % vm_action) elif vm_action == "start": if start_action == "rename": if not output.count("shut off (failed)"): test.fail(err_msg % vm_action) else: if not output.count("booted"): test.fail(err_msg % vm_action) elif vm_action == "kill": if not output.count("crashed"): test.fail(err_msg % vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): test.fail(err_msg % vm_action) # VM will be in preserved state, perform virsh reset # and check VM reboots and domstate reflects running # state from crashed state as bug is observed here if vm_oncrash_action == "preserve" and reset_action: virsh_dargs = {'debug': True, 'ignore_status': True} ret = virsh.reset(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.domstate(vm_name, extra, **virsh_dargs).stdout.strip() if "paused (crashed)" not in ret: test.fail("vm fails to change state from crashed" " to paused after virsh reset") # it will be in paused (crashed) state after reset # and resume is required for the vm to reboot ret = virsh.resume(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.wait_for_login() cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() if "running" not in cmd_output: test.fail("guest state failed to get updated") if vm_oncrash_action in [ 'coredump-destroy', 'coredump-restart' ]: if not find_dump_file: test.fail("Core dump file is not created in dump " "path: %s" % dump_path) # For cover bug 1178652 if (vm_oncrash_action == "rename-restart" and check_libvirtd_log == "yes"): libvirtd.restart() if not os.path.exists(libvirtd_log_file): test.fail("Expected VM log file: %s not exists" % libvirtd_log_file) cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file)) if not process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail( "Find error message %s from log file: %s." % (err_msg, libvirtd_log_file)) elif vm_action == "dump": if dump_option == "--live": if not output.count("running (unpaused)"): test.fail(err_msg % vm_action) elif dump_option == "--crash": if not output.count("shut off (crashed)"): test.fail(err_msg % vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search( "blocked", output) or re.search("idle", output)): test.fail("Run failed with right command") finally: qemu_conf.restore() if check_libvirtd_log == "yes": libvirtd_conf.restore() if os.path.exists(libvirtd_log_file): os.remove(libvirtd_log_file) libvirtd.restart() if vm_action == "start" and start_action == "rename": os.rename(new_image_source, image_source) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def run(test, params, env): """ Test auto_dump_* parameter in qemu.conf. 1) Change auto_dump_* in qemu.conf; 2) Restart libvirt daemon; 4) Check if file open state changed accordingly. """ vm_name = params.get("main_vm", "virt-tests-vm1") bypass_cache = params.get("auto_dump_bypass_cache", "not_set") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # Set panic device panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if not vmxml.xmltreefile.find('devices').findall('panic'): raise error.TestNAError("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it") # Setup qemu.conf if bypass_cache == 'not_set': del config.auto_dump_bypass_cache else: config.auto_dump_bypass_cache = bypass_cache dump_path = os.path.join(test.tmpdir, "dump") config.auto_dump_path = dump_path os.mkdir(dump_path) # Restart libvirtd to make change valid. libvirtd.restart() # Restart VM to create a new qemu process. if vm.is_alive(): vm.destroy() vm.start() session = vm.wait_for_login() # Stop kdump in the guest session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=1) except ShellTimeoutError: pass session.close() iohelper_pid = utils.run('pgrep -f %s' % dump_path).stdout.strip() logging.error('%s', iohelper_pid) # Get file open flags containing bypass cache information. fdinfo = open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') flags = 0 for line in fdinfo.readlines(): if line.startswith('flags:'): flags = int(line.split()[1], 8) logging.debug('File open flag is: %o', flags) fdinfo.close() cmdline = open('/proc/%s/cmdline' % iohelper_pid).readline() logging.debug(cmdline.split()) # Kill core dump process to speed up test utils.run('kill %s' % iohelper_pid) # Check if bypass cache flag set or unset accordingly. if (flags & 040000) and bypass_cache != '1': raise error.TestFail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) if not (flags & 040000) and bypass_cache == '1': raise error.TestFail('auto_dump_bypass_cache is %s but flags ' 'is %o' % (bypass_cache, flags)) finally: backup_xml.sync() config.restore() libvirtd.restart()
def run(test, params, env): """ Test command: virsh domstats. 1.Prepare vm state. 2.Perform virsh domstats operation. 3.Confirm the test result. 4.Recover test environment. """ default_vm_name = params.get("main_vm", "avocado-vt-vm1") default_vm = env.get_vm(default_vm_name) vm_list = params.get("vm_list", "") vm_state = params.get("vm_state", "") domstats_option = params.get("domstats_option") raw_print = "yes" == params.get("raw_print", "no") enforce_command = "yes" == params.get("enforce_command", "no") iothread_add_ids = eval(params.get("iothread_add_ids", '[]')) iothread_del_ids = eval(params.get("iothread_del_ids", '[]')) status_error = (params.get("status_error", "no") == "yes") if "--nowait" in domstats_option and not libvirt_version.version_compare( 4, 5, 0): test.cancel( "--nowait option is supported until libvirt 4.5.0 version...") vms = [default_vm] if vm_list: for name in vm_list.split(): if name != default_vm_name: if env.get_vm(name): vms.append(env.get_vm(name)) backup_xml_list = [] try: if not status_error: for vm in vms: # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) backup_xml_list.append(vmxml.copy()) if vm_state == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = "preserve" # Add <panic> device to domain panic_dev = Panic() panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() virsh.start(vm.name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm.name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall( 'panic'): test.cancel("No 'panic' device in the guest, maybe " "your libvirt version doesn't support it.") prepare_vm_state(vm, vm_state) if enforce_command: domstats_option += " --enforce" if raw_print: domstats_option += " --raw" if "--nowait" in domstats_option: pool = ThreadPool(processes=1) async_result = pool.apply_async(create_snap, (vm_list, '--no-metadata')) return_val = async_result.get() if "--iothread" in domstats_option: add_iothread(vm_list, iothread_add_ids) # Run virsh command result = virsh.domstats(vm_list, domstats_option, ignore_status=True, debug=True) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if "unsupported flags" in result.stderr: test.cancel(result.stderr) test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") else: for vm in vms: if not check_output(output, vm, vm_state, domstats_option): test.fail("Check command output failed") if "--iothread" in domstats_option: test_iothread(vm_list, iothread_add_ids, iothread_del_ids, output, test) finally: try: for vm in vms: vm.destroy(gracefully=False) except AttributeError: pass for backup_xml in backup_xml_list: backup_xml.sync()