def virtio_serial_login(self, port='vs1'): error.context("Try to login guest via '%s'" % port, logging.info) username = self.params.get("username") password = self.params.get("password") prompt = self.params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) for vport in self.get_virtio_ports(self.vm)[1]: if vport.name == port: break vport = None if not vport: raise error.TestError("Not virtio serial port '%s' found" % port) logfile = "serial-%s-%s.log" % (vport.name, self.vm.name) socat_cmd = "nc -U %s" % vport.hostfile session = aexpect.ShellSession(socat_cmd, auto_close=False, output_func=utils_misc.log_line, output_params=(logfile,), prompt=prompt) session.set_linesep(linesep) session.sendline() self.__sessions__.append(session) try: remote.handle_prompts(session, username, password, prompt, 180) raise error.TestFail("virtio serial '%s' should no " % port + "channel to login") except remote.LoginTimeoutError: self.__sessions__.append(session) logging.info("Can't login via %s" % port) return session
def edit_image_xml(): edit_cmd = r":%s /<boot dev='hd'\/>/<boot dev='cdrom'\/>" if restore_state == "running": option = "--running" elif restore_state == "paused": option = "--paused" else: raise error.TestFail("Unknown save-image-define option") session = aexpect.ShellSession("sudo -s") try: logging.info("Execute virsh save-image-edit %s %s", vm_save, option) session.sendline("virsh save-image-edit %s %s " % (vm_save, option)) logging.info("Replace '<boot dev='hd'/>' to '<boot dev='cdrom'/>'") session.sendline(edit_cmd) session.send('\x1b') session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do save-image-edit: %s\n%s" % (details, log))
def edit_snap_xml(dom_name, edit_opts, edit_cmd): """ Edit domain snapshot xml :param dom_name: name of domain :param snap_name: name of snapshot :param edit_opts: snapshot-edit options :param edit_cmd: edit command list in interactive mode """ session = aexpect.ShellSession("sudo -s") try: logging.debug("snapshot-edit options is: %s" % edit_opts) logging.debug("edit cmd is: %s" % edit_cmd) session.sendline("virsh snapshot-edit %s %s" % (dom_name, edit_opts)) for i in edit_cmd: session.sendline(i) # Press ESC session.send('\x1b') # Save and quit session.send('ZZ') # use sleep(1) to make sure the modify has been completed. remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() logging.info("Succeed to do snapshot edit") except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do snapshot-edit: %s\n%s" % (details, log))
def run(test, params, env): """ Test steps: 1) Check the environment and get the params from params. 2) while(loop_time < timeout): ttcp command. 3) clean up. """ # Find the ttcp command. try: os_dep.command("ttcp") except ValueError: raise error.TestNAError("Not find ttcp command on host.") # Get VM. vms = env.get_all_vms() for vm in vms: session = vm.wait_for_login() status, _ = session.cmd_status_output("which ttcp") if status: raise error.TestNAError("Not find ttcp command on guest.") # Get parameters from params. timeout = int(params.get("LB_ttcp_timeout", "600")) ttcp_server_command = params.get("LB_ttcp_server_command", "ttcp -s -r -v -D -p5015") ttcp_client_command = params.get("LB_ttcp_client_command", "ttcp -s -t -v -D -p5015 -b65536 -l65536 -n1000 -f K") host_session = aexpect.ShellSession("sh") try: current_time = int(time.time()) end_time = current_time + timeout # Start the loop from current_time to end_time. while current_time < end_time: for vm in vms: session = vm.wait_for_login() host_session.sendline(ttcp_server_command) cmd = ("%s %s" % (ttcp_client_command, utils_net.get_host_ip_address(params))) def _ttcp_good(): status, output = session.cmd_status_output(cmd) logging.debug(output) if status: return False return True if not utils_misc.wait_for(_ttcp_good, timeout=5): status, output = session.cmd_status_output(cmd) if status: raise error.TestFail("Failed to run ttcp command on guest.\n" "Detail: %s." % output) remote.handle_prompts(host_session, None, None, r"[\#\$]\s*$") current_time = int(time.time()) finally: # Clean up. host_session.close() session.close()
def run(test, params, env): """ Test virsh nwfilter-edit with uuid. 1) Prepare parameters. 2) Run nwfilter-edit command. 3) Check result. 4) Clean env """ # Prepare parameters filter_name = params.get("edit_filter_name", "") status_error = params.get("status_error", "no") new_uuid = "11111111-1111-1111-1111-111111111111" edit_cmd = ":2s/<uuid>.*$/<uuid>%s<\/uuid>/" % new_uuid # Since commit 46a811d, the logic changed for not allow update filter # uuid, so decide status_error with libvirt version. if libvirt_version.version_compare(1, 2, 7): status_error = True else: status_error = False # Backup filter xml new_filter = libvirt_xml.NwfilterXML() filterxml = new_filter.new_from_filter_dumpxml(filter_name) logging.debug("the filter xml is: %s" % filterxml.xmltreefile) try: # Run command session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh nwfilter-edit %s" % filter_name) session.sendline(edit_cmd) # Press ESC session.send('\x1b') # Save and quit session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() if not status_error: logging.info("Succeed to do nwfilter edit") else: raise error.TestFail("edit uuid should fail but got succeed.") except (aexpect.ShellError, aexpect.ExpectError, remote.LoginTimeoutError), details: log = session.get_output() session.close() if "Try again? [y,n,f,?]:" in log and status_error: logging.debug("edit uuid failed as expected.") else: raise error.TestFail("Failed to do nwfilter-edit: %s\n%s" % (details, log)) finally: # Clean env virsh.nwfilter_undefine(filter_name, debug=True) virsh.nwfilter_define(filterxml.xml, debug=True)
def remote_login(client, host, src, params_login, host_ip): src_name = src if src != "localhost": src_name = src.name logging.info("Login %s from %s" % (host, src)) port = params_login["target_port"] username = params_login["username"] password = params_login["password"] prompt = params_login["shell_prompt"] linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n")) quit_cmd = params.get("quit_cmd", "exit") if host == host_ip: # Try to login from guest to host. prompt = "^\[.*\][\#\$]\s*$" linesep = "\n" username = params_login["host_username"] password = params_login["host_password"] quit_cmd = "exit" if client == "ssh": # We only support ssh for Linux in this test cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, host)) elif client == "telnet": cmd = "telnet -l %s %s %s" % (username, host, port) else: raise remote.LoginBadClientError(client) if src == "localhost": logging.debug("Login with command %s" % cmd) session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt) else: if params_login.get("os_type") == "windows": if client == "telnet": cmd = "C:\\telnet.py %s %s " % (host, username) cmd += "%s \"%s\" && " % (password, prompt) cmd += "C:\\wait_for_quit.py" cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd else: cmd += " || sleep 5" session = src.wait_for_login() logging.debug("Sending login command: %s" % cmd) session.sendline(cmd) try: out = remote.handle_prompts(session, username, password, prompt, timeout, debug=True) except Exception, err: session.close() raise err
def modify_vcpu(source, edit_cmd): """ Modify vm's cpu information. :param source : virsh edit's option. :param dic_mode : a edit commad line . :return: True if edit successed,False if edit failed. """ session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh edit %s" % source) session.sendline(edit_cmd) session.send('\x1b') session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() return True except: return False
def edit_ifstart_mode(iface_name, old_mode, new_mode): """ Set the start mode of a interface. """ edit_cmd = ":%s/mode='{0}'/mode='{1}'".format(old_mode, new_mode) session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh iface-edit %s" % iface_name) logging.info("Change start mode from %s to %s", old_mode, new_mode) session.sendline(edit_cmd) session.send('\x1b') session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do iface-edit: %s\n%s" % (details, log))
def edit_net_xml(): edit_cmd = r":%s /100.254/100.253" session = aexpect.ShellSession("sudo -s") try: logging.info("Execute virsh net-edit %s", net_name) session.sendline("virsh net-edit %s" % net_name) logging.info("Change the ip value of dhcp end") session.sendline(edit_cmd) session.send('\x1b') session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do net-edit: %s\n%s" % (details, log))
def modify_vcpu(source, edit_cmd): """ Modify vm's cpu infomation. @param: source : virsh edit's option. @param: dic_mode : a edit commad line . @return: True if edit successed,False if edit failed. """ session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh edit %s" % source) session.sendline(edit_cmd) session.send('\x1b') session.send('ZZ') # use sleep(1) to make sure the modify has been completed. time.sleep(1) session.close() return True except: return False
def edit_pool(pool, edit_cmd): """ Edit libvirt storage pool. :param pool: pool name or uuid. :param edit_cmd : edit commad line. """ session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh pool-edit %s" % pool) logging.info("edit_cmd: %s", edit_cmd) for cmd in edit_cmd: logging.info("cmd: %s", cmd) session.sendline(cmd) session.send('\x1b') session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() logging.info("Succeed to do pool edit.") except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do pool edit: %s\n%s" % (details, log))
def edit_filter_xml(filter_name, edit_cmd): """ Edit network filter xml :param filter_name: filter name or uuid :param edit_cmd: edit command list in interactive mode """ session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh nwfilter-edit %s" % filter_name) for i in edit_cmd: session.sendline(i) # Press ESC session.send('\x1b') # Save and quit session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$") session.close() logging.info("Succeed to do nwfilter edit") except (aexpect.ShellError, aexpect.ExpectError), details: log = session.get_output() session.close() raise error.TestFail("Failed to do nwfilter-edit: %s\n%s" % (details, log))
def run_virsh_console(test, params, env): """ Test command: virsh console. """ os_type = params.get("os_type") if os_type == "windows": raise error.TestNAError("SKIP:Do not support Windows.") # Get parameters for test vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("virsh_console_vm_ref", "domname") vm_state = params.get("virsh_console_vm_state", "running") login_user = params.get("console_login_user", "root") if login_user == "root": login_passwd = params.get("password") else: login_passwd = params.get("console_password_not_root") status_error = "yes" == params.get("status_error", "no") domuuid = vm.get_uuid() domid = "" # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) if vm.is_alive(): vm.destroy() xml_console_config(vm_name) try: # Guarantee cleanup after config vm console failed. vm_console_config(vm) # Prepare vm state for test if vm_state != "shutoff": vm.start(autoconsole=False) vm.wait_for_login() domid = vm.get_id() if vm_state == "paused": vm.pause() if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid elif domid and vm_ref == "hex_id": vm_ref = hex(int(domid)) # Run command command = "virsh console %s" % vm_ref console_session = aexpect.ShellSession(command) status = verify_virsh_console(console_session, login_user, login_passwd, debug=True) console_session.close() finally: # Recover state of vm. if vm_state == "paused": vm.resume() # Recover vm if vm.is_alive(): vm.destroy() xml_console_recover(vmxml_backup) # Check result if status_error: if status: raise error.TestFail("Run successful with wrong command!") else: if not status: raise error.TestFail("Run failed with right command!")
def run(test, params, env): """ Virsh create test with --pass-fds for container """ fds_options = params.get("create_lxc_fds_options", "") other_options = params.get("create_lxc_other_options", "") uri = params.get("connect_uri", "lxc:///") vm_name = params.get("vms") vcpu = params.get("create_lxc_vcpu", 1) max_mem = params.get("create_lxc_maxmem", 500000) cur_mem = params.get("create_lxc_curmem", 500000) dom_type = params.get("create_lxc_domtype", "lxc") os_type = params.get("create_lxc_ostype", "exe") os_arch = params.get("create_lxc_osarch", "x86_64") os_init = params.get("create_lxc_osinit", "/bin/sh") emulator_path = params.get("create_lxc_emulator", "/usr/libexec/libvirt_lxc") tmpfile1 = params.get("create_lxc_tmpfile1", "/tmp/foo") tmpfile2 = params.get("create_lxc_tmpfile2", "/tmp/bar") tmpfile3 = params.get("create_lxc_tmpfile3", "/tmp/wizz") def container_xml_generator(): """ Generate container xml """ vmxml = vm_xml.VMXML(dom_type) vmxml.vm_name = vm_name vmxml.max_mem = max_mem vmxml.current_mem = cur_mem vmxml.vcpu = vcpu vmxml.os_type = os_type vmxml.os_arch = os_arch vmxml.os_init = os_init # Generate emulator emulator = Emulator() emulator.path = emulator_path # Generate console console = Console() # Add emulator and console in devices devices = vm_xml.VMXMLDevices() devices.append(emulator) devices.append(console) logging.debug("device is %s", devices) vmxml.set_devices(devices) return vmxml fd1 = open(tmpfile1, 'w') fd2 = open(tmpfile2, 'w') fd3 = open(tmpfile3, 'w') try: options = "%s %s,%s,%s %s" % (fds_options, fd1.fileno(), fd2.fileno(), fd3.fileno(), other_options) vmxml = container_xml_generator() logging.debug("xml is %s", commands.getoutput("cat %s" % vmxml.xml)) if "--console" not in options: output = virsh.create(vmxml.xml, options, uri=uri) if output.exit_status: raise error.TestFail("Create %s domain failed:%s" % (dom_type, output.stderr)) logging.info("Domain %s created, will check with console", vm_name) command = "virsh -c %s console %s" % (uri, vm_name) else: command = "virsh -c %s create %s %s" % (uri, vmxml.xml, options) session = aexpect.ShellSession(command) time.sleep(2) for i in (tmpfile1, tmpfile2, tmpfile3): lsofcmd = "lsof|grep '^sh.*%s'" % i cmd_status, cmd_output = session.cmd_status_output(lsofcmd) if cmd_status != 0: raise error.TestFail("Can not find file %s in container" % i) else: logging.info("Find open file in guest: %s", cmd_output) session.close() vm = env.get_vm(vm_name) if "--autodestroy" in options: if vm.is_alive(): raise error.TestFail("Guest still exist after close session " "with option --autodestroy") logging.info("Guest already destroyed after session closed") elif not vm.is_alive(): raise error.TestFail("Guest is not running after close session!") else: logging.info("Guest still exist after session closed") finally: fd1.close() fd2.close() fd3.close() os.remove(tmpfile1) os.remove(tmpfile2) os.remove(tmpfile3)
def run(test, params, env): """ Test for perf kvm command. 1) Check the perf kvm on host. 2) Get variables. 3) generate perf kvm command. 4) Mount guest filesystem for --guestmount or get kernel info files from guest for --guestkallsyms 5) Execute command for each case. 6) Verify the result, compare the content from host and guest. 7) Cleanup. """ perf_kvm_exec = _perf_kvm_help() if not perf_kvm_exec: raise error.TestNAError("No perf-kvm found in your host.") vm = env.get_vm(params.get("main_vm", "virt-tests-vm1")) vms = env.get_all_vms() guestmount = ("yes" == params.get("perf_kvm_guestmount", "no")) host = ("yes" == params.get("perf_kvm_host", "no")) guest = ("yes" == params.get("perf_kvm_guest", "no")) multi_guest = ("yes" == params.get("perf_kvm_multi_guest", "no")) top = ("yes" == params.get("perf_kvm_top", "no")) record = ("yes" == params.get("perf_kvm_record", "no")) report = ("yes" == params.get("perf_kvm_report", "no")) diff = ("yes" == params.get("perf_kvm_diff", "no")) buildid_list = ("yes" == params.get("perf_kvm_buildid_list", "no")) guestmount_path = None guestkallsyms_path = None guestmodules_path = None output_of_record = os.path.join(test.tmpdir, "record.output") # As diff command need two files, init a variable for it. output_for_diff = os.path.join(test.tmpdir, "record.output.diff") host_result_file = os.path.join(test.tmpdir, "perf_kvm_result") guest_result_file = os.path.join(test.tmpdir, "guest_result") result_on_guest = "/root/result" command = perf_kvm_exec if host: command = "%s --host" % command if guest: command = "%s --guest" % command session = vm.wait_for_login() try: if guestmount: try: os_dep.command("sshfs") except ValueError: raise error.TestNAError( "Please install fuse-sshfs for perf kvm " "with --guestmount.") if multi_guest: if len(vms) < 2: raise error.TestNAError("Only one vm here, skipping " "this case for multi-guest.") guestmount_path = mount_guestfs_with_sshfs(vms) command = "%s --guestmount %s" % (command, guestmount_path) else: guestkallsyms_path, guestmodules_path = get_kernel_file(vm) command = "%s --guestkallsyms %s --guestmodules %s" % ( command, guestkallsyms_path, guestmodules_path) session.cmd("dd if=/dev/zero of=/dev/null bs=1 count=1G &") if top: session = vm.wait_for_login() # Time for top, there is no sleep subcommand in perf top such as # in perf record, then we can not control the time in perf command. # So, we have to use timeout command to wrap it here. host_command = "timeout 30 %s top 1>%s" % (command, host_result_file) guest_command = "timeout 30 perf top >%s" % (result_on_guest) host_session = aexpect.ShellSession("sh") host_session.sendline(host_command) _, output = session.cmd_status_output(guest_command) host_session.close() if (host and guest): vm.copy_files_from(result_on_guest, guest_result_file) host_first = find_first_kernel_symbol(host_result_file, "g") index_in_guest = find_symbol_in_result(guest_result_file, host_first) if index_in_guest < 0: raise error.TestFail( "Not find symbol %s in guest result." % host_first) if index_in_guest > 5: raise error.TestFail( "Perf information for guest is not correct." "The first symbol in host_result is %s, " "but this symbol is in %s index in result " "from guest.\n" % (host_first, index_in_guest)) if record: session = vm.wait_for_login() host_command = "%s record -a sleep 10 " % (command) guest_command = "perf record -a sleep 10 &" status, output = session.cmd_status_output(guest_command) if status: raise error.TestNAError( "Please make sure there is perf command " "on guest.\n Detail: %s." % output) result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) if report: session = vm.wait_for_login() host_command = "%s report 1>%s" % (command, host_result_file) guest_command = "perf report 1>%s" % (result_on_guest) status, output = session.cmd_status_output(guest_command) if status: raise error.TestNAError( "Please make sure there is perf command " "on guest.\n Detail: %s." % output) result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) if (host and guest): vm.copy_files_from(result_on_guest, guest_result_file) host_first = find_first_kernel_symbol(host_result_file, "g") index_in_guest = find_symbol_in_result(guest_result_file, host_first) if index_in_guest < 0: raise error.TestFail( "Not find symbol %s in guest result." % host_first) if index_in_guest > 5: raise error.TestFail( "Perf information for guest is not correct." "The first symbol in host_result is %s, " "but this symbol is in %s index in result " "from guest.\n" % (host_first, index_in_guest)) if diff: session = vm.wait_for_login() host_command = "%s record -o %s -a sleep 10" % (command, output_of_record) # Run twice to capture two perf data files for diff. result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) host_command = "%s record -o %s -a sleep 10" % (command, output_for_diff) result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) host_command = "%s diff %s %s" % (command, output_of_record, output_for_diff) result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) if buildid_list: host_command = "%s buildid-list" % command result = utils.run(host_command, ignore_status=True) if result.exit_status: raise error.TestFail(result) finally: if session: session.close() umount_guestfs_with_sshfs(vms) if guestkallsyms_path and os.path.exists(guestkallsyms_path): os.remove(guestkallsyms_path) if guestmodules_path and os.path.exists(guestmodules_path): os.remove(guestmodules_path) if host_result_file and os.path.exists(host_result_file): os.remove(host_result_file) if guest_result_file and os.path.exists(guest_result_file): os.remove(guest_result_file)
def run(test, params, env): """ Test virsh create command including parameters except --pass-fds because it is used in lxc. Basic test scenarios: 1. --console with all combination(with other options) 2. --autodestroy with left combination 3. --paused itself """ vm_name = params.get("main_vm") options = params.get("create_options", "") status_error = ("yes" == params.get("status_error", "no")) c_user = params.get("create_login_user", "root") readonly = params.get("readonly", False) if c_user == "root": c_passwd = params.get("password") else: c_passwd = params.get("create_login_password_nonroot") vm = env.get_vm(vm_name) if vm.exists(): if vm.is_alive(): vm.destroy() xmlfile = vm.backup_xml() vm.undefine() else: xmlfile = params.get("create_domain_xmlfile") if xmlfile is None: raise error.TestFail("Please provide domain xml file for create or" " existing domain name with main_vm = xx") #get vm name from xml file xml_cut = commands.getoutput("grep '<name>.*</name>' %s" % xmlfile) vm_name = xml_cut.strip(' <>').strip("name").strip("<>/") logging.debug("vm_name is %s", vm_name) vm = env.get_vm(vm_name) try: def create_status_check(vm): """ check guest status 1. if have options --paused: check status and resume 2. check if guest is running after 1 """ # sleep to make sure guest is paused time.sleep(2) if "--paused" in options: if not vm.is_paused(): raise error.TestFail("Guest status is not paused with" "options %s, state is %s" % (options, vm.state())) else: logging.info("Guest status is paused.") vm.resume() if vm.state() == "running": logging.info("Guest is running now.") else: raise error.TestFail("Fail to create guest, guest state is %s" % vm.state()) def create_autodestroy_check(vm): """ check if guest will disappear with --autodestroy """ if vm.exists(): raise error.TestFail("Guest still exist with options %s" % options) else: logging.info("Guest does not exist after session closed.") try: if status_error: output = virsh.create(xmlfile, options, readonly=readonly) if output.exit_status: logging.info("Fail to create guest as expect:%s", output.stderr) if vm.state() == "running": raise error.TestFail("Expect fail, but succeed indeed") elif "--console" in options: # Use session for console command = "virsh create %s %s" % (xmlfile, options) session = aexpect.ShellSession(command) # check domain status including paused and running create_status_check(vm) status = utils_test.libvirt.verify_virsh_console( session, c_user, c_passwd, timeout=90, debug=True) if not status: raise error.TestFail("Fail to verify console") session.close() # check if domain exist after session closed if "--autodestroy" in options: create_autodestroy_check(vm) elif "--autodestroy" in options: # Use session for virsh interactive mode because # guest will be destroyed after virsh exit command = "virsh" session = aexpect.ShellSession(command) while True: match, text = session.read_until_any_line_matches( [r"Domain \S+ created from %s" % xmlfile, r"virsh # "], timeout=10, internal_timeout=1) if match == -1: logging.info("Run create %s %s", xmlfile, options) command = "create %s %s" % (xmlfile, options) session.sendline(command) elif match == -2: logging.info("Domain created from %s", xmlfile) break create_status_check(vm) logging.info("Close session!") session.close() # check if domain exist after session closed create_autodestroy_check(vm) else: # have --paused option or none options output = virsh.create(xmlfile, options) if output.exit_status: raise error.TestFail("Fail to create domain:%s" % output.stderr) create_status_check(vm) except (aexpect.ShellError, aexpect.ExpectError), detail: log = session.get_output() session.close() vm.define(xmlfile) raise error.TestFail("Verify create failed:\n%s\n%s" % (detail, log)) finally: #Guest recovery vm.define(xmlfile)
def run(test, params, env): """ Test the command virsh metadata Run in 4 steps: 1. Set domain metadata 2. Get domain metadata 3. Restart libvirtd then get domain metadata again 4. Remove domain metadata then get domain metadata again """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value", "") metadata_option = params.get("metadata_option", "") virsh_dargs = {'debug': True, 'ignore_status': True} metadata_set = "yes" == params.get("metadata_set", "no") metadata_get = "yes" == params.get("metadata_get", "yes") metadata_remove = "yes" == params.get("metadata_remove", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") status_error = "yes" == params.get("status_error", "no") if not metadata_uri: raise error.TestErrorr("'uri' is needed") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Start VM if vm.state() != "running": vm.destroy() vm.start() def pretty_xml(xml_str): return xml.dom.minidom.parseString(xml_str).toprettyxml() def check_result(result, expect_status, expect_output=None): """ Check virsh metadata command """ utlv.check_exit_status(result, expect_status) if result.exit_status == 0 and expect_output: expect_output = pretty_xml(expect_output) logging.debug("Expect metadata: %s", expect_output) output = result.stdout.strip() output = pretty_xml(output) logging.debug("Command get metadata: %s", output) if output != expect_output: raise error.TestFail("Metadat is not expected") def get_metadata(metadata_option=""): """ Get domain metadata """ option = metadata_option.replace("--edit", "") result = virsh.metadata(vm_name, metadata_uri, options=option, key=metadata_key, **virsh_dargs) return result try: # Set metadata XML if metadata_set: if not metadata_key: raise error.TestErrorr("'key' is needed") if not metadata_value: raise error.TestErrorr("New metadata is needed") # Parse metadata value if "--edit" in metadata_option: virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (vm_name, metadata_uri, metadata_key, metadata_option) session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True) except Exception, e: logging.error("Error occured: %s", e) session.close() else: result = virsh.metadata(vm_name, metadata_uri, options=metadata_option, key=metadata_key, new_metadata=metadata_value, **virsh_dargs) check_result(result, status_error) # Get metadata for option in metadata_option.split(): if option == "--config": vm.destroy() vm.start() check_result(get_metadata(metadata_option=option), status_error, metadata_value) elif metadata_get: check_result(get_metadata(metadata_option=option), status_error, metadata_value) # Restart libvirtd: if restart_libvirtd: libvirtd = Libvirtd() libvirtd.restart() # Get metadata again for option in metadata_option.split(): check_result(get_metadata(metadata_option=option), status_error, metadata_value) # Remove metadata if metadata_remove: remove_option = metadata_option.replace("--edit", "") remove_option += " --remove" result = virsh.metadata(vm_name, metadata_uri, options=remove_option, key=metadata_key, **virsh_dargs) check_result(result, status_error) # Get metadata again for option in metadata_option.split(): check_result(get_metadata(metadata_option=option), True)
def run(test, params, env): """ Test command: virsh console. """ os_type = params.get("os_type") if os_type == "windows": raise error.TestNAError("SKIP:Do not support Windows.") # Get parameters for test vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("virsh_console_vm_ref", "domname") vm_state = params.get("virsh_console_vm_state", "running") login_user = params.get("console_login_user", "root") if login_user == "root": login_passwd = params.get("password") else: login_passwd = params.get("console_password_not_root") status_error = "yes" == params.get("status_error", "no") domuuid = vm.get_uuid() domid = "" uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() if vm.is_qemu(): xml_console_config(vm_name) try: # Guarantee cleanup after config vm console failed. if vm.is_qemu(): vm_console_config(vm) # Prepare vm state for test if vm_state != "shutoff": vm.start(autoconsole=False) if vm.is_qemu(): # LXC cannot login here, because it will use virsh console # to login, it will break the console action in next step vm.wait_for_login() domid = vm.get_id() if vm_state == "paused": vm.pause() if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = domid elif vm_ref == "domuuid": vm_ref = domuuid elif domid and vm_ref == "hex_id": vm_ref = hex(int(domid)) # Run command if params.get('setup_libvirt_polkit') == 'yes': cmd = "virsh -c %s console %s" % (uri, vm_ref) command = "su - %s -c '%s'" % (unprivileged_user, cmd) else: command = "virsh console %s" % vm_ref console_session = aexpect.ShellSession(command) status = utils_test.libvirt.verify_virsh_console(console_session, login_user, login_passwd, timeout=10, debug=True) console_session.close() finally: # Recover state of vm. if vm_state == "paused": vm.resume() # Recover vm if vm.is_alive(): vm.destroy() if vm.is_qemu(): xml_console_recover(vmxml_backup) # Check result if status_error: if status: raise error.TestFail("Run successful with wrong command!") else: if not status: raise error.TestFail("Run failed with right command!")
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") # check the source disk if not target: raise error.TestFail("Require target disk to copy") if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s.", target, vm_name) else: raise error.TestFail("Can't find %s in domain %s." % (target, vm_name)) options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") default_timeout = int(params.get("default_timeout", "300")) reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: if not libvirt_version.version_compare(1, 2, 13): raise error.TestNAError("--blockdev option not supported in " "current version") options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not copy_to_nfs: raise error.TestNAError("Bug will not fix:" " https://bugzilla.redhat.com/show_bug." "cgi?id=924151") extra_dict = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout } libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s.", dest_path, expect) else: raise error.TestFail("%s format is not %s." % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. """ bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise error.TestFail("Hit on bug: %s" % bug_url_) err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise error.TestFail("Hit on bug: %s" % bug_url_) def _blockcopy_cmd(): """ Run blockcopy command """ cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) _blockjob_and_libvirtd_chk(cmd_result) if cmd_result.exit_status: return False elif "Copy aborted" in cmd_result.stdout: return False else: return cmd_result def _make_snapshot(): """ Make external disk snapshot """ snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap") snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name') or disk_xml.source.attrs.has_key('pool')): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] elif new_attrs.has_key('pool'): del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path utils.system(cmd) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise error.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow: options += " --shallow" _make_snapshot() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise error.TestFail("Run blockcopy command fail.") elif not os.path.exists(dest_path): raise error.TestFail("Cannot find the created copy.") cmd_result = utils_misc.wait_for(_blockcopy_cmd, 10) if not cmd_result: raise error.TestFail("Run blockcopy command fail.") status = 0 else: cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise error.TestFail("Libvirtd service is dead.") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise error.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): utl.check_blockjob(vm_name, target, "bandwidth", bandwidth) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise error.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() val = options.count("--pivot") + options.count("--finish") if val == 0: try: finish_job(vm_name, target, default_timeout) except JobTimeout, excpt: raise error.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_statues=True, debug=True) utl.check_exit_status(ret, active_error) else: raise error.TestFail(cmd_result.stderr) else:
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def create_iface_xml(iface_mac): """ Create interface xml file """ iface = Interface(type_name=iface_type) source = ast.literal_eval(iface_source) if source: iface.source = source iface.model = iface_model if iface_model else "virtio" iface.mac_address = iface_mac driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) logging.debug("Create new interface xml: %s", iface) return iface def modify_iface_xml(update, status_error=False): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_model: iface.model = iface_model else: del iface.model if iface_type: iface.type_name = iface_type del iface.source source = ast.literal_eval(iface_source) if source: net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and source.has_key('dev') and source['dev'] not in net_ifs): logging.warn( "Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] iface.source = source backend = ast.literal_eval(iface_backend) if backend: iface.backend = backend driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if iface.address: del iface.address logging.debug("New interface xml file: %s", iface) if unprivileged_user: # Create disk image for unprivileged user disk_index = xml_devices.index( xml_devices.by_device_tag("disk")[0]) disk_xml = xml_devices[disk_index] logging.debug("source: %s", disk_xml.source) disk_source = disk_xml.source.attrs["file"] cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}" "".format(disk_source, dst_disk, unprivileged_user)) utils.run(cmd) disk_xml.source = disk_xml.new_disk_source( attrs={"file": dst_disk}) vmxml.devices = xml_devices # Remove all channels to avoid of permission problem channels = vmxml.get_devices(device_type="channel") for channel in channels: vmxml.del_device(channel) vmxml.xmltreefile.write() logging.debug("New VM xml: %s", vmxml) utils.run("chmod a+rw %s" % vmxml.xml) virsh.define(vmxml.xml, **virsh_dargs) # Try to modify interface xml by update-device or edit xml elif update: iface.xmltreefile.write() ret = virsh.update_device(vm_name, iface.xml, ignore_status=True) libvirt.check_exit_status(ret, status_error) else: vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def check_offloads_option(if_name, driver_options, session=None): """ Check interface offloads by ethtool output """ offloads = { "csum": "tx-checksumming", "gso": "generic-segmentation-offload", "tso4": "tcp-segmentation-offload", "tso6": "tx-tcp6-segmentation", "ecn": "tx-tcp-ecn-segmentation", "ufo": "udp-fragmentation-offload" } if session: ret, output = session.cmd_status_output("ethtool -k %s | head" " -18" % if_name) else: out = utils.run("ethtool -k %s | head -18" % if_name) ret, output = out.exit_status, out.stdout if ret: raise error.TestFail("ethtool return error code") logging.debug("ethtool output: %s", output) for offload in driver_options.keys(): if offloads.has_key(offload): if (output.count(offloads[offload]) and not output.count( "%s: %s" % (offloads[offload], driver_options[offload]))): raise error.TestFail( "offloads option %s: %s isn't" " correct in ethtool output" % (offloads[offload], driver_options[offload])) def run_xml_test(iface_mac): """ Test for interface options in vm xml """ # Get the interface object according the mac address vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_devices = vmxml.get_devices(device_type="interface") iface = None for iface_dev in iface_devices: if iface_dev.mac_address == iface_mac: iface = iface_dev if not iface: raise error.TestFail("Can't find interface with mac" " '%s' in vm xml" % iface_mac) driver_dict = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) for driver_opt in driver_dict.keys(): if not driver_dict[driver_opt] == iface.driver.driver_attr[ driver_opt]: raise error.TestFail( "Can't see driver option %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) if iface_target: if (not iface.target.has_key("dev") or not iface.target["dev"].startswith(iface_target)): raise error.TestFail("Can't see device target dev in vm xml") # Check macvtap mode by ip link command if iface_target == "macvtap" and iface.source.has_key("mode"): cmd = "ip -d link show %s" % iface.target["dev"] output = utils.run(cmd).stdout logging.debug("ip link output: %s", output) mode = iface.source["mode"] if mode == "passthrough": mode = "passthru" if not output.count("macvtap mode %s" % mode): raise error.TestFail("Failed to verify macvtap mode") def run_cmdline_test(iface_mac): """ Test for qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if test_vhost_net: cmd += " | grep 'vhost=on'" ret = utils.run(cmd) if ret.exit_status: raise error.TestFail("Can't parse qemu-kvm command line") logging.debug("Command line %s", ret.stdout) if iface_model == "virtio": model_option = "device virtio-net-pci" else: model_option = "device rtl8139" iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout) if not iface_cmdline: raise error.TestFail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in iface_driver_dict.keys(): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) for driver_opt in driver_dict.keys(): if (not cmd_opt.has_key(driver_opt) or not cmd_opt[driver_opt] == driver_dict[driver_opt]): raise error.TestFail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" % backend["tap"]) def get_guest_ip(session, mac): """ Wrapper function to get guest ip address """ utils_net.restart_guest_network(session, mac) # Wait for IP address is ready utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac), 10) return utils_net.get_guest_ip_addr(session, mac) def check_user_network(session): """ Check user network ip address on guest """ vm_ips = [] vm_ips.append(get_guest_ip(session, iface_mac_old)) if attach_device: vm_ips.append(get_guest_ip(session, iface_mac)) logging.debug("IP address on guest: %s", vm_ips) if len(vm_ips) != len(set(vm_ips)): raise error.TestFail("Duplicated IP address on guest. " "Check bug: https://bugzilla.redhat." "com/show_bug.cgi?id=1147238") for vm_ip in vm_ips: if vm_ip is None or not vm_ip.startswith("10.0.2."): raise error.TestFail("Found wrong IP address" " on guest") # Check gateway address gateway = utils_net.get_net_gateway(session.cmd_output) if gateway != "10.0.2.2": raise error.TestFail("The gateway on guest is not" " right") # Check dns server address ns_list = utils_net.get_net_nameserver(session.cmd_output) if "10.0.2.3" not in ns_list: raise error.TestFail("The dns server can't be found" " on guest") def check_mcast_network(session): """ Check multicast ip address on guests """ src_addr = ast.literal_eval(iface_source)['address'] add_session = additional_vm.wait_for_serial_login() vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if utils.run(cmd, ignore_status=True).exit_status: raise error.TestFail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in vms_sess_dict.keys(): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: raise error.TestFail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): raise error.TestFail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_misc.yum_install(["omping"]): raise error.TestError("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(vms_ip_dict.values()))) # Run a backgroup job waiting for connection of client bgjob = utils.AsyncJob(cmd) # Run omping client on guests for vms in vms_sess_dict.keys(): # omping should be installed first if not utils_misc.yum_install(["omping"], vms_sess_dict[vms]): raise error.TestError("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): raise error.TestFail("omping failed on guest") # Kill the backgroup job bgjob.kill_func() status_error = "yes" == params.get("status_error", "no") start_error = "yes" == params.get("start_error", "no") unprivileged_user = params.get("unprivileged_user") # Interface specific attributes. iface_type = params.get("iface_type", "network") iface_source = params.get("iface_source", "{}") iface_driver = params.get("iface_driver") iface_model = params.get("iface_model") iface_target = params.get("iface_target") iface_backend = params.get("iface_backend", "{}") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") attach_device = params.get("attach_iface_device") change_option = "yes" == params.get("change_iface_options", "no") update_device = "yes" == params.get("update_iface_device", "no") additional_guest = "yes" == params.get("additional_guest", "no") serial_login = "******" == params.get("serial_login", "no") test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no") test_option_xml = "yes" == params.get("test_iface_option_xml", "no") test_vhost_net = "yes" == params.get("test_vhost_net", "no") test_option_offloads = "yes" == params.get("test_option_offloads", "no") test_iface_user = "******" == params.get("test_iface_user", "no") test_iface_mcast = "yes" == params.get("test_iface_mcast", "no") test_libvirtd = "yes" == params.get("test_libvirtd", "no") test_guest_ip = "yes" == params.get("test_guest_ip", "no") test_backend = "yes" == params.get("test_backend", "no") if iface_driver_host or iface_driver_guest or test_backend: if not libvirt_version.version_compare(1, 2, 8): raise error.TestNAError("Offloading/backend options not " "supported in this libvirt version") if iface_driver and "queues" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(1, 0, 6): raise error.TestNAError("Queues options not supported" " in this libvirt version") if unprivileged_user: virsh_dargs["unprivileged_user"] = unprivileged_user # Create unprivileged user if needed cmd = ("grep {0} /etc/passwd || " "useradd {0}".format(unprivileged_user)) utils.run(cmd) # Need another disk image for unprivileged user to access dst_disk = "/tmp/%s.img" % unprivileged_user # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name) # iface_mac will update if attach a new interface iface_mac = iface_mac_old # Additional vm for test additional_vm = None libvirtd = utils_libvirtd.Libvirtd() try: # Build the xml and run test. try: # Prepare interface backend files if test_backend: if not os.path.exists("/dev/vhost-net"): utils.run("modprobe vhost-net") backend = ast.literal_eval(iface_backend) backend_tap = "/dev/net/tun" backend_vhost = "/dev/vhost-net" if not backend: backend["tap"] = backend_tap backend["vhost"] = backend_vhost if not start_error: # Create backend files for normal test if not os.path.exists(backend["tap"]): os.rename(backend_tap, backend["tap"]) if not os.path.exists(backend["vhost"]): os.rename(backend_vhost, backend["vhost"]) # Edit the interface xml. if change_option: modify_iface_xml(update=False) # Check vhost driver. if test_vhost_net: if os.path.exists("/dev/vhost-net"): cmd = ("modprobe -r {0}; lsmod | " "grep {0}".format("vhost_net")) if not utils.system(cmd, ignore_status=True): raise error.TestError("Can't remove " "vhost_net driver") # Attach a interface when vm is shutoff if attach_device == 'config': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--config", ignore_status=True) libvirt.check_exit_status(ret) # Clone additional vm if additional_guest: guest_name = "%s_%s" % (vm_name, '1') # Clone additional guest timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) additional_vm = vm.clone(guest_name) additional_vm.start() #additional_vm.wait_for_login() # Start the VM. if unprivileged_user: virsh.start(vm_name, **virsh_dargs) cmd = ("su - %s -c 'virsh console %s'" % (unprivileged_user, vm_name)) session = aexpect.ShellSession(cmd) session.sendline() remote.handle_prompts(session, params.get("username"), params.get("password"), "[\#\$]", 30) # Get ip address on guest if not get_guest_ip(session, iface_mac): raise error.TestError("Can't get ip address on guest") else: # Will raise VMStartError exception if start fails vm.start() if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if start_error: raise error.TestFail("VM started unexpectedly") if test_vhost_net: if utils.system("lsmod | grep vhost_net", ignore_status=True): raise error.TestFail("vhost_net module can't be" " loaded automatically") # Attach a interface when vm is running if attach_device == 'live': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--live", ignore_status=True) libvirt.check_exit_status(ret) # Need sleep here for attachment take effect time.sleep(5) # Update a interface options if update_device: modify_iface_xml(update=True, status_error=status_error) # Run tests for qemu-kvm command line options if test_option_cmd: run_cmdline_test(iface_mac) # Run tests for vm xml if test_option_xml: run_xml_test(iface_mac) # Run tests for offloads options if test_option_offloads: if iface_driver_host: ifname_guest = utils_net.get_linux_ifname( session, iface_mac) check_offloads_option(ifname_guest, ast.literal_eval(iface_driver_host), session) if iface_driver_guest: ifname_host = libvirt.get_ifname_host(vm_name, iface_mac) check_offloads_option(ifname_host, ast.literal_eval(iface_driver_guest)) if test_iface_user: # Test user type network check_user_network(session) if test_iface_mcast: # Test mcast type network check_mcast_network(session) # Check guest ip address if test_guest_ip: if not get_guest_ip(session, iface_mac): raise error.TestFail("Guest can't get a" " valid ip address") session.close() # Restart libvirtd and guest, then test again if test_libvirtd: libvirtd.restart() vm.destroy() vm.start() if test_option_xml: run_xml_test(iface_mac) # Detach hot/cold-plugged interface at last if attach_device: ret = virsh.detach_device(vm_name, iface_xml_obj.xml, flagstr="", ignore_status=True) libvirt.check_exit_status(ret) except virt_vm.VMStartError, e: logging.info(str(e)) if start_error: pass else: raise error.TestFail('VM Failed to start for some reason!') finally: # Recover VM. logging.info("Restoring vm...") # Restore interface backend files if test_backend: if not os.path.exists(backend_tap): os.rename(backend["tap"], backend_tap) if not os.path.exists(backend_vhost): os.rename(backend["vhost"], backend_vhost) if unprivileged_user: virsh.remove_domain(vm_name, "--remove-all-storage", **virsh_dargs) if additional_vm: virsh.remove_domain(additional_vm.name, "--remove-all-storage") # Kill all omping server process on host utils.system("pidof omping && killall omping", ignore_status=True) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n): # Add all disks into commandline. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, 4): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: raise error.TestFail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): raise error.TestNAError("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: raise error.TestFail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml.get_devices(device_type="disk")[0] vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(range(4)): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: raise error.TestFail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 2" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) # Run test case result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: raise error.TestFail(err_msg) else: if disk_src_file != snap_src_lst[3]: raise error.TestFail(err_msg) else: if disk_src_file != blk_source: raise error.TestFail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': raise error.TestFail( "blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug( "after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: raise error.TestFail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: raise error.TestFail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)