def run(test, params, env): """ Test watchdog device: 1.Add watchdog device to the guest xml. 2.Start the guest. 3.Trigger the watchdog in the guest. 4.Confirm the guest status. """ def trigger_watchdog(model): """ Trigger watchdog :param model: action when watchdog triggered """ watchdog_device = "device %s" % model if action == "dump": watchdog_action = "watchdog-action pause" else: watchdog_action = "watchdog-action %s" % action if not hotplug_test: vm_pid = vm.get_pid() with open("/proc/%s/cmdline" % vm_pid) as vm_cmdline_file: vm_cmdline = vm_cmdline_file.read() vm_cmdline = vm_cmdline.replace('\x00', ' ') if not all(option in vm_cmdline for option in (watchdog_device, watchdog_action)): test.fail("Can not find %s or %s in qemu cmd line" % (watchdog_device, watchdog_action)) cmd = "gsettings set org.gnome.settings-daemon.plugins.power button-power shutdown" session.cmd(cmd, ignore_all_errors=True) try: if model == "ib700": try: session.cmd("modprobe ib700wdt") except aexpect.ShellCmdError: session.close() test.fail("Failed to load module ib700wdt") session.cmd("dmesg | grep -i %s && lsmod | grep %s" % (model, model)) session.cmd("echo 1 > /dev/watchdog") except aexpect.ShellCmdError as e: session.close() test.fail("Failed to trigger watchdog: %s" % e) def watchdog_attached(vm_name): """ Confirm whether watchdog device is attached to vm by checking domain dumpxml :param vm_name: vm name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if vmxml.xmltreefile.find('devices/watchdog'): return True else: return False def confirm_guest_status(): """ Confirm the guest status after watchdog triggered """ def _booting_completed(): session = vm.wait_for_login() status = None second_boot_time = None try: status, second_boot_time = session.cmd_status_output("uptime --since") logging.debug("The second boot time is %s", second_boot_time) except (aexpect.ShellStatusError, aexpect.ShellProcessTerminatedError) as e: logging.error("Exception caught:%s", e) session.close() return second_boot_time > first_boot_time def _inject_nmi(): session = vm.wait_for_login() status, output = session.cmd_status_output("dmesg | grep -i nmi") session.close() if status == 0: logging.debug(output) return True return False def _inject_nmi_event(): virsh_session.send_ctrl("^C") output = virsh_session.get_stripped_output() if "inject-nmi" not in output: return False return True def _check_dump_file(dump_path, domain_id): dump_file = glob.glob('%s%s-*' % (dump_path, domain_id)) if len(dump_file): logging.debug("Find the auto core dump file:\n%s", dump_file[0]) os.remove(dump_file[0]) return True return False if action in ["poweroff", "shutdown"]: if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10): test.fail("Guest not shutdown after watchdog triggered") else: logging.debug("Guest is in shutdown state after watchdog triggered") elif action == "reset": if not utils_misc.wait_for(_booting_completed, 600, 10): test.fail("Guest not reboot after watchdog triggered") else: logging.debug("Guest is rebooted after watchdog triggered") elif action == "pause": if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10): logging.debug("Guest is in paused status after watchdog triggered.") cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() logging.debug("Check guest status: %s\n", cmd_output) if cmd_output != "paused (watchdog)": test.fail("The domstate is not correct after dump by watchdog") else: test.fail("Guest not pause after watchdog triggered") elif action == "none": if utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10): test.fail("Guest shutdown unexpectedly") else: logging.debug("Guest is not in shutoff state since watchdog action is none.") elif action == "inject-nmi": if not utils_misc.wait_for(_inject_nmi, 180, 10): test.fail("Guest not receive inject-nmi after watchdog triggered\n") elif not utils_misc.wait_for(_inject_nmi_event, 180, 10): test.fail("No inject-nmi watchdog event caught") else: logging.debug("Guest received inject-nmi and inject-nmi watchdog event " " has been caught.") virsh_session.close() elif action == "dump": domain_id = vm.get_id() dump_path = "/var/lib/libvirt/qemu/dump/" if not utils_misc.wait_for(lambda: _check_dump_file(dump_path, domain_id), 180, 10): test.fail("No auto core dump file found after watchdog triggered") else: logging.debug("VM core has been dumped after watchdog triggered.") name_length = params.get("name_length", "default") vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) model = params.get("model") action = params.get("action") model_test = params.get("model_test") == "yes" hotplug_test = params.get("hotplug_test") == "yes" hotunplug_test = params.get("hotunplug_test") == "yes" machine_type = params.get("machine_type") if machine_type == "q35" and model == "ib700": test.cancel("ib700wdt watchdog device is not supported " "on guest with q35 machine type") # Backup xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Rename the guest name to the length defined in the config file if name_length != "default": origin_name = vm_name name_length = int(params.get("name_length", "1")) vm_name = ''.join([random.choice(string.ascii_letters+string.digits) for _ in range(name_length)]) vm_xml.VMXML.vm_rename(vm, vm_name) # Generate the renamed xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Coldplug pcie-to-pci-bridge to vm xml for q35 guest as i6300esb watchdog # device can only be plugged to conventional PCI slot if (machine_type == 'q35' and not vmxml.get_controllers(controller_type='pci', model='pcie-to-pci-bridge')): logging.debug("Add pcie-root-port and pcie-to-pci-bridge controller to vm") pcie_root_port = Controller("pci") pcie_pci_bridge = Controller("pci") pcie_root_port.model = "pcie-root-port" pcie_pci_bridge.model = "pcie-to-pci-bridge" pcie_root_port.model_name = {'name': 'pcie-root-port'} pcie_pci_bridge.model_name = {'name': 'pcie-pci-bridge'} vmxml.add_device(pcie_root_port) vmxml.add_device(pcie_pci_bridge) vmxml.sync() if hotplug_test: vm.start() session = vm.wait_for_login() # Add watchdog device to domain vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = model watchdog_dev.action = action chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) watchdog_dev.alias = {'name': alias_name} try: if model_test or hotunplug_test: vmxml.add_device(watchdog_dev) vmxml.sync() try: vm.start() except Exception: test.fail("VM startup after adding watchdog device failed!") elif hotplug_test: watchdog_xml = watchdog_dev.xml attach_result = virsh.attach_device(vm_name, watchdog_xml, ignore_status=False, debug=True) if not utils_misc.wait_for(lambda: watchdog_attached(vm.name), 60): test.fail("Failed to hotplug watchdog device.") session = vm.wait_for_login() # No need to trigger watchdog after hotunplug if hotunplug_test: cur_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) cur_watchdog = cur_xml.xmltreefile.find('devices/watchdog') cur_watchdog_xml = Watchdog.new_from_element(cur_watchdog).xml detach_result = virsh.detach_device(vm_name, cur_watchdog_xml, ignore_status=True, debug=True) if detach_result.exit_status: test.fail("i6300esb watchdog device can NOT be detached successfully, " "result:\n%s" % detach_result) elif not utils_misc.wait_for(lambda: not watchdog_attached(vm.name), 60): test.fail("Failed to hotunplug watchdog device.") return if action == "reset": status, first_boot_time = session.cmd_status_output("uptime --since") logging.info("The first boot time is %s\n", first_boot_time) if action == "inject-nmi": virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) event_cmd = "event --event watchdog --all --loop" virsh_session.sendline(event_cmd) trigger_watchdog(model) confirm_guest_status() finally: if vm.is_alive(): vm.destroy(gracefully=False) if name_length != "default": vm_xml.VMXML.vm_rename(vm, origin_name) backup_xml.sync()
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) dest_path = os.path.join(data_dir.get_data_dir(), "copy") try: for event in events_list: logging.debug("Current event is: %s", event) if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash', 'device-removal-failed', 'watchdog', 'io-error']: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName("description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=90) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync") time.sleep(5) session.close() expected_events_list.append("'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk(dom.name, new_disk, target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occured: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) snapshot_path = dom.get_blk_devices()['vda']['source'] virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % snapshot_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda ready") expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % disk_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda completed") os.unlink(snapshot_path) elif event == "blockcopy": disk_path = dom.get_blk_devices()['vda']['source'] dom.undefine() virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % disk_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda ready") expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % dest_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda completed") elif event == "detach-dimm": prepare_vmxml_mem(vmxml) tg_size = params.get("dimm_size") tg_sizeunit = params.get("dimm_unit") dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit) virsh.attach_device(dom.name, dimm_xml.xml, flagstr="--config", **virsh_dargs) vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm) virsh.start(dom.name, **virsh_dargs) dom.wait_for_login().close() result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True) expected_fails = params.get("expected_fails") utlv.check_result(result, expected_fails) vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live) expected_events_list.append("'device-removal-failed' for %s: dimm0") elif event == "watchdog": vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = params.get("watchdog_model") action = params.get("action") watchdog_dev.action = action vmxml.add_device(watchdog_dev) vmxml.sync() logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml) virsh.start(dom.name, **virsh_dargs) session = dom.wait_for_login() try: session.cmd("echo 0 > /dev/watchdog") except (ShellTimeoutError, ShellProcessTerminatedError) as details: test.fail("Failed to trigger watchdog: %s" % details) session.close() # watchdog acts slowly, waiting for it. time.sleep(30) expected_events_list.append("'watchdog' for %s: " + "%s" % action) if action == 'pause': expected_events_list.append("'lifecycle' for %s: Suspended Watchdog") virsh.resume(dom.name, **virsh_dargs) else: # action == 'reset' expected_events_list.append("'reboot' for %s") elif event == "io-error": part_size = params.get("part_size") resume_event = params.get("resume_event") suspend_event = params.get("suspend_event") process.run("truncate -s %s %s" % (part_size, small_part), shell=True) utlv.mkfs(small_part, part_format) utils_misc.mount(small_part, mount_point, None) add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2') dom.start() session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True) time.sleep(5) session.close() expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) process.run("df -hT") virsh.resume(dom.name, **virsh_dargs) time.sleep(5) expected_events_list.append(resume_event) expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) if os.path.exists(dest_path): os.unlink(dest_path) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ Test watchdog device: 1.Add watchdog device to the guest xml. 2.Start the guest. 3.Trigger the watchdog in the guest. 4.Confirm the guest status. """ def trigger_watchdog(model): """ Trigger watchdog :param model: action when watchdog triggered """ watchdog_device = "device %s" % model if action == "dump": watchdog_action = "watchdog-action pause" else: watchdog_action = "watchdog-action %s" % action vm_pid = vm.get_pid() with open("/proc/%s/cmdline" % vm_pid) as vm_cmdline_file: vm_cmdline = vm_cmdline_file.read() vm_cmdline = vm_cmdline.replace('\x00', ' ') if not all(option in vm_cmdline for option in (watchdog_device, watchdog_action)): test.fail("Can not find %s or %s in qemu cmd line" % (watchdog_device, watchdog_action)) cmd = "gsettings set org.gnome.settings-daemon.plugins.power button-power shutdown" session.cmd(cmd, ignore_all_errors=True) try: if model == "ib700": try: session.cmd("modprobe ib700wdt") except aexpect.ShellCmdError: session.close() test.fail("Failed to load module ib700wdt") session.cmd("dmesg | grep -i %s && lsmod | grep %s" % (model, model)) session.cmd("echo 1 > /dev/watchdog") except aexpect.ShellCmdError: session.close() test.fail("Failed to trigger watchdog") def confirm_guest_status(): """ Confirm the guest status after watchdog triggered """ def _booting_completed(): session = vm.wait_for_login() status, second_boot_time = session.cmd_status_output("uptime --since") logging.debug("The second boot time is %s", second_boot_time) session.close() return second_boot_time > first_boot_time def _inject_nmi(): session = vm.wait_for_login() status, output = session.cmd_status_output("dmesg | grep -i nmi") session.close() if status == 0: logging.debug(output) return True return False def _inject_nmi_event(): virsh_session.send_ctrl("^C") output = virsh_session.get_stripped_output() if "inject-nmi" not in output: return False return True def _check_dump_file(dump_path, domain_id): dump_file = glob.glob('%s%s-*' % (dump_path, domain_id)) if len(dump_file): logging.debug("Find the auto core dump file:\n%s", dump_file[0]) os.remove(dump_file[0]) return True return False if action in ["poweroff", "shutdown"]: if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10): test.fail("Guest not shutdown after watchdog triggered") elif action == "reset": if not utils_misc.wait_for(_booting_completed, 600, 10): test.fail("Guest not reboot after watchdog triggered") elif action == "pause": if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10): cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() logging.debug("Check guest status: %s\n", cmd_output) if cmd_output != "paused (watchdog)": test.fail("The domstate is not correct after dump by watchdog") else: test.fail("Guest not pause after watchdog triggered") elif action == "none" and utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10): test.fail("Guest shutdown unexpectedly") elif action == "inject-nmi": if not utils_misc.wait_for(_inject_nmi, 180, 10): test.fail("Guest not receive inject-nmi after watchdog triggered\n") elif not utils_misc.wait_for(_inject_nmi_event, 180, 10): test.fail("No inject-nmi watchdog event caught") virsh_session.close() elif action == "dump": domain_id = vm.get_id() dump_path = "/var/lib/libvirt/qemu/dump/" if not utils_misc.wait_for(lambda: _check_dump_file(dump_path, domain_id), 180, 10): test.fail("No auto core dump file found after watchdog triggered") name_length = params.get("name_length", "default") vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) model = params.get("model") action = params.get("action") # Backup xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Rename the guest name to the length defined in the config file if name_length != "default": origin_name = vm_name name_length = int(params.get("name_length", "1")) vm_name = ''.join([random.choice(string.ascii_letters+string.digits) for _ in range(name_length)]) vm_xml.VMXML.vm_rename(vm, vm_name) # Generate the renamed xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add watchdog device to domain vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = model watchdog_dev.action = action chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) watchdog_dev.alias = {'name': alias_name} vmxml.add_device(watchdog_dev) vmxml.sync() try: vm.start() session = vm.wait_for_login() if action == "reset": status, first_boot_time = session.cmd_status_output("uptime --since") logging.info("The first boot time is %s\n", first_boot_time) if action == "inject-nmi": virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) event_cmd = "event --event watchdog --all --loop" virsh_session.sendline(event_cmd) trigger_watchdog(model) confirm_guest_status() finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync()
def run(test, params, env): """ Test watchdog device: 1.Add watchdog device to the guest xml. 2.Start the guest. 3.Trigger the watchdog in the guest. 4.Confirm the guest status. """ def trigger_watchdog(model): """ Trigger watchdog :param model: action when watchdog triggered """ watchdog_device = "device %s" % model if action == "dump": watchdog_action = "watchdog-action pause" else: watchdog_action = "watchdog-action %s" % action vm_pid = vm.get_pid() with open("/proc/%s/cmdline" % vm_pid) as vm_cmdline_file: vm_cmdline = vm_cmdline_file.read() vm_cmdline = vm_cmdline.replace('\x00', ' ') if not all(option in vm_cmdline for option in (watchdog_device, watchdog_action)): test.fail("Can not find %s or %s in qemu cmd line" % (watchdog_device, watchdog_action)) cmd = "gsettings set org.gnome.settings-daemon.plugins.power button-power shutdown" session.cmd(cmd, ignore_all_errors=True) try: if model == "ib700": try: session.cmd("modprobe ib700wdt") except aexpect.ShellCmdError: session.close() test.fail("Failed to load module ib700wdt") session.cmd("dmesg | grep %s && lsmod | grep %s" % (model, model)) session.cmd("echo 1 > /dev/watchdog") except aexpect.ShellCmdError: session.close() test.fail("Failed to trigger watchdog") def confirm_guest_status(): """ Confirm the guest status after watchdog triggered """ def _booting_completed(): session = vm.wait_for_login() output = session.cmd_status_output("last reboot") second_boot_time = output[1].strip().split("\n")[0].split()[-4] logging.debug(second_boot_time) session.close() return second_boot_time > first_boot_time def _inject_nmi(): session = vm.wait_for_login() status, output = session.cmd_status_output("dmesg | grep -i nmi") session.close() if status == 0: logging.debug(output) return True return False def _inject_nmi_event(): virsh_session.send_ctrl("^C") output = virsh_session.get_stripped_output() if "inject-nmi" not in output: return False return True def _check_dump_file(dump_path, domain_id): dump_file = glob.glob('%s%s-*' % (dump_path, domain_id)) if len(dump_file): logging.debug("Find the auto core dump file:\n%s", dump_file[0]) os.remove(dump_file[0]) return True return False if action in ["poweroff", "shutdown"]: if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10): test.fail("Guest not shutdown after watchdog triggered") elif action == "reset": if not utils_misc.wait_for(_booting_completed, 600, 10): test.fail("Guest not reboot after watchdog triggered") elif action == "pause": if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10): cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() logging.debug("Check guest status: %s\n", cmd_output) if cmd_output != "paused (watchdog)": test.fail( "The domstate is not correct after dump by watchdog") else: test.fail("Guest not pause after watchdog triggered") elif action == "none" and utils_misc.wait_for( lambda: vm.state() == "shut off", 180, 10): test.fail("Guest shutdown unexpectedly") elif action == "inject-nmi": if not utils_misc.wait_for(_inject_nmi, 180, 10): test.fail( "Guest not receive inject-nmi after watchdog triggered\n") elif not utils_misc.wait_for(_inject_nmi_event, 180, 10): test.fail("No inject-nmi watchdog event caught") virsh_session.close() elif action == "dump": domain_id = vm.get_id() dump_path = "/var/lib/libvirt/qemu/dump/" if not utils_misc.wait_for( lambda: _check_dump_file(dump_path, domain_id), 180, 10): test.fail( "No auto core dump file found after watchdog triggered") name_length = params.get("name_length", "default") vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) model = params.get("model") action = params.get("action") # Backup xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Rename the guest name to the length defined in the config file if name_length != "default": origin_name = vm_name name_length = int(params.get("name_length", "1")) vm_name = ''.join([ random.choice(string.ascii_letters + string.digits) for _ in range(name_length) ]) vm_xml.VMXML.vm_rename(vm, vm_name) # Generate the renamed xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add watchdog device to domain vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = model watchdog_dev.action = action chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) watchdog_dev.alias = {'name': alias_name} vmxml.add_device(watchdog_dev) vmxml.sync() try: vm.start() session = vm.wait_for_login() if action == "reset": output = session.cmd_status_output("last reboot") first_boot_time = output[1].strip().split("\n")[0].split()[-4] logging.info("The first boot time is %s\n", first_boot_time) if action == "inject-nmi": virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) event_cmd = "event --event watchdog --all --loop" virsh_session.sendline(event_cmd) trigger_watchdog(model) confirm_guest_status() finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync()
def run(test, params, env): """ Test detach-device-alias command with --config, --live, --current 1. Test hostdev device detach 2. Test scsi controller device detach 3. Test redirect device detach 4. Test channel devices detach """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) detach_options = params.get("detach_alias_options", "") detach_check_xml = params.get("detach_check_xml") # hostdev device params hostdev_type = params.get("detach_hostdev_type", "") hostdev_managed = params.get("detach_hostdev_managed") # controller params contr_type = params.get("detach_controller_type") contr_model = params.get("detach_controller_mode") # redirdev params redir_type = params.get("detach_redirdev_type") redir_bus = params.get("detach_redirdev_bus") # channel params channel_type = params.get("detach_channel_type") channel_target = eval(params.get("detach_channel_target", "{}")) # watchdog params watchdog_type = params.get("detach_watchdog_type") watchdog_dict = eval(params.get('watchdog_dict', '{}')) device_alias = "ua-" + str(uuid.uuid4()) def check_detached_xml_noexist(): """ Check detached xml does not exist in the guest dumpxml :return: True if it does not exist, False if still exists """ domxml_dt = virsh.dumpxml(vm_name, dump_option).stdout_text.strip() if detach_check_xml not in domxml_dt: return True else: return False def get_usb_info(): """ Get local host usb info :return: usb vendor and product id """ install_cmd = process.run("yum install usbutils* -y", shell=True) result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'", shell=True) if not result.exit_status: return result.stdout_text.rstrip(':') else: test.error("Can not get usb hub info for testing") # backup xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() device_xml = None attach_device = True if not vm.is_alive(): vm.start() # wait for vm start successfully vm.wait_for_login() if hostdev_type: if hostdev_type in ["usb", "scsi"]: if hostdev_type == "usb": pci_id = get_usb_info() elif hostdev_type == "scsi": source_disk = libvirt.create_scsi_disk(scsi_option="", scsi_size="8") pci_id = get_scsi_info(source_disk) device_xml = libvirt.create_hostdev_xml(pci_id=pci_id, dev_type=hostdev_type, managed=hostdev_managed, alias=device_alias) else: test.error("Hostdev type %s not handled by test." " Please check code." % hostdev_type) if contr_type: controllers = vmxml.get_controllers(contr_type) contr_index = len(controllers) + 1 contr_dict = { "controller_type": contr_type, "controller_model": contr_model, "controller_index": contr_index, "contr_alias": device_alias } device_xml = libvirt.create_controller_xml(contr_dict) detach_check_xml = detach_check_xml % contr_index if redir_type: device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus, device_alias) if channel_type: channel_params = {'channel_type_name': channel_type} channel_params.update(channel_target) device_xml = libvirt.create_channel_xml(channel_params, device_alias) if watchdog_type: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml.remove_all_device_by_type('watchdog') device_xml_file = Watchdog() device_xml_file.update({"alias": {"name": device_alias}}) device_xml_file.setup_attrs(**watchdog_dict) vmxml.devices = vmxml.devices.append(device_xml_file) vmxml.xmltreefile.write() vmxml.sync() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug('The vmxml after attached watchdog is:%s', vmxml) if not vm.is_alive(): vm.start() vm.wait_for_login().close() attach_device = False try: dump_option = "" wait_event = True if "--config" in detach_options: dump_option = "--inactive" wait_event = False # Attach xml to domain if attach_device: logging.info("Attach xml is %s" % process.run("cat %s" % device_xml.xml).stdout_text) virsh.attach_device(vm_name, device_xml.xml, flagstr=detach_options, debug=True, ignore_status=False) domxml_at = virsh.dumpxml(vm_name, dump_option, debug=True).stdout.strip() if detach_check_xml not in domxml_at: test.error("Can not find %s in domxml after attach" % detach_check_xml) # Detach xml with alias result = virsh.detach_device_alias(vm_name, device_alias, detach_options, wait_for_event=wait_event, event_timeout=20, debug=True) libvirt.check_exit_status(result) if not utils_misc.wait_for( check_detached_xml_noexist, 60, step=2, text="Repeatedly search guest dumpxml with detached xml"): test.fail("Still can find %s in domxml" % detach_check_xml) finally: backup_xml.sync() if hostdev_type == "scsi": libvirt.delete_scsi_disk()